Search is not available for this dataset
text
string | meta
dict |
---|---|
\documentclass[10pt,landscape,a4paper]{article}
\usepackage[right=10mm, left=10mm, top=10mm, bottom=10mm]{geometry}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[english]{babel}
\usepackage[rm,light]{roboto}
\usepackage{xcolor}
\usepackage{graphicx}
\graphicspath{{./figures/}}
\usepackage{multicol}
\usepackage{colortbl}
\usepackage{array}
\setlength\parindent{0pt}
\setlength{\tabcolsep}{2pt}
\baselineskip=0pt
\setlength\columnsep{1em}
\definecolor{Gray}{gray}{0.85}
% --- Listing -----------------------------------------------------------------
\usepackage{listings}
\lstset{
frame=tb, framesep=4pt, framerule=0pt,
backgroundcolor=\color{black!5},
basicstyle=\ttfamily,
commentstyle=\ttfamily\color{black!50},
breakatwhitespace=false,
breaklines=true,
extendedchars=true,
keepspaces=true,
language=Python,
rulecolor=\color{black},
showspaces=false,
showstringspaces=false,
showtabs=false,
tabsize=2,
%
emph = { plot, scatter, imshow, bar, contourf, pie, subplots, spines,
add_gridspec, add_subplot, set_xscale, set_minor_locator,
annotate, set_minor_formatter, tick_params, fill_betweenx, text, legend,
errorbar, boxplot, hist, title, xlabel, ylabel, suptitle },
emphstyle = {\ttfamily\bfseries}
}
% --- Fonts -------------------------------------------------------------------
\usepackage{fontspec}
\usepackage[babel=true]{microtype}
\defaultfontfeatures{Ligatures = TeX, Mapping = tex-text}
\setsansfont{Roboto} [ Path = fonts/roboto/Roboto-,
Extension = .ttf,
UprightFont = Light,
ItalicFont = LightItalic,
BoldFont = Regular,
BoldItalicFont = Italic ]
\setromanfont{RobotoSlab} [ Path = fonts/roboto-slab/RobotoSlab-,
Extension = .ttf,
UprightFont = Light,
BoldFont = Bold ]
\setmonofont{RobotoMono} [ Path = fonts/roboto-mono/RobotoMono-,
Extension = .ttf,
Scale = 0.90,
UprightFont = Light,
ItalicFont = LightItalic,
BoldFont = Regular,
BoldItalicFont = Italic ]
\renewcommand{\familydefault}{\sfdefault}
% -----------------------------------------------------------------------------
\begin{document}
\thispagestyle{empty}
\section*{\LARGE \rmfamily
Matplotlib \textcolor{orange}{\mdseries for intermediate users}}
\begin{multicols*}{3}
A matplotlib figure is composed of a hierarchy of elements that forms
the actual figure. Each element can be modified. \medskip
\includegraphics[width=\linewidth]{anatomy.pdf}
\subsection*{\rmfamily Figure, axes \& spines}
% -----------------------------------------------------------------------------
\begin{tabular}{@{}m{.821\linewidth}m{.169\linewidth}}
\begin{lstlisting}[belowskip=-\baselineskip]
fig, axs = plt.subplots((3,3))
axs[0,0].set_facecolor("#ddddff")
axs[2,2].set_facecolor("#ffffdd")
\end{lstlisting}
& \raisebox{-0.75em}{\includegraphics[width=\linewidth]{layout-subplot-color.pdf}}
\end{tabular}
% -----------------------------------------------------------------------------
\begin{tabular}{@{}m{.821\linewidth}m{.169\linewidth}}
\begin{lstlisting}[belowskip=-\baselineskip]
gs = fig.add_gridspec(3, 3)
ax = fig.add_subplot(gs[0, :])
ax.set_facecolor("#ddddff")
\end{lstlisting}
& \raisebox{-0.75em}{\includegraphics[width=\linewidth]{layout-gridspec-color.pdf}}
\end{tabular}
% -----------------------------------------------------------------------------
\begin{tabular}{@{}m{.821\linewidth}m{.169\linewidth}}
\begin{lstlisting}[belowskip=-\baselineskip]
fig, ax = plt.subplots()
ax.spines["top"].set_color("None")
ax.spines["right"].set_color("None")
\end{lstlisting}
& \raisebox{-0.75em}{\includegraphics[width=\linewidth]{layout-spines.pdf}}
\end{tabular}
% -----------------------------------------------------------------------------
\subsection*{\rmfamily Ticks \& labels}
\begin{lstlisting}[basicstyle=\ttfamily\small]
from mpl.ticker import MultipleLocator as ML
from mpl.ticker import ScalarFormatter as SF
ax.xaxis.set_minor_locator(ML(0.2))
ax.xaxis.set_minor_formatter(SF())
ax.tick_params(axis='x',which='minor',rotation=90)
\end{lstlisting}
\includegraphics[width=\linewidth]{tick-multiple-locator.pdf}
% -----------------------------------------------------------------------------
\subsection*{\rmfamily Lines \& markers}
\begin{lstlisting}
X = np.linspace(0.1, 10*np.pi, 1000)
Y = np.sin(X)
ax.plot(X, Y, "C1o:", markevery=25, mec="1.0")
\end{lstlisting}
\includegraphics[width=\linewidth]{sine-marker.pdf}
% -----------------------------------------------------------------------------
\subsection*{\rmfamily Scales \& projections}
\begin{lstlisting}
fig, ax = plt.subplots()
ax.set_xscale("log")
ax.plot(X, Y, "C1o-", markevery=25, mec="1.0")
\end{lstlisting}
\includegraphics[width=\linewidth]{sine-logscale.pdf}
\subsection*{\rmfamily Text \& ornaments}
\begin{lstlisting}[]
ax.fill_betweenx([-1,1],[0],[2*np.pi])
ax.text(0, -1, r" Period $\Phi$")
\end{lstlisting}
\includegraphics[width=\linewidth]{sine-period.pdf}
% -----------------------------------------------------------------------------
\subsection*{\rmfamily Legend}
\begin{lstlisting}[]
ax.plot(X, np.sin(X), "C0", label="Sine")
ax.plot(X, np.cos(X), "C1", label="Cosine")
ax.legend(bbox_to_anchor=(0,1,1,.1),ncol=2,
mode="expand", loc="lower left")
\end{lstlisting}
\includegraphics[width=\linewidth]{sine-legend.pdf}
% -----------------------------------------------------------------------------
\subsection*{\rmfamily Annotation}
\begin{lstlisting}[]
ax.annotate("A", (X[250],Y[250]),(X[250],-1),
ha="center", va="center",arrowprops =
{"arrowstyle" : "->", "color": "C1"})
\end{lstlisting}
\includegraphics[width=\linewidth]{sine-annotate.pdf}
% -----------------------------------------------------------------------------
\subsection*{\rmfamily Colors}
Any color can be used, but Matplotlib offers sets of colors:\\
\includegraphics[width=\linewidth]{colors-cycle.pdf} \smallskip
\includegraphics[width=\linewidth]{colors-grey.pdf}\\
%As well as nice colormaps (viridis an magma):\\
%\includegraphics[width=\linewidth]{colormap-viridis.pdf} \smallskip
%\includegraphics[width=\linewidth]{colormap-magma.pdf} \medskip
% -----------------------------------------------------------------------------
\vspace{-1em}
\subsection*{\rmfamily Size \& DPI}
Consider a square figure to be included in a two-columns A4 paper with
2cm margins on each side and a column separation of 1cm. The width of
a figure is (21 - 2*2 - 1)/2 = 8cm. One inch being 2.54cm, figure size
should be 3.15$\times$3.15 in.
\begin{lstlisting}[]
fig = plt.figure(figsize=(3.15,3.15), dpi=50)
plt.savefig("figure.pdf", dpi=600)
\end{lstlisting}
\vfill
%
{\scriptsize
Matplotlib 3.4.2 handout for intermediate users.
Copyright (c) 2021 Matplotlib Development Team.
Released under a CC-BY 4.0 International License.
Supported by NumFOCUS.
\par}
\end{multicols*}
\end{document}
| {
"alphanum_fraction": 0.5833675284,
"avg_line_length": 34.6492890995,
"ext": "tex",
"hexsha": "d04971b565e10ecbac81efee3b94201cb2d82ae3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2e1d87bd3d03497b335ba5f936102a9fb5e7afac",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "tamtridung/cheatsheets",
"max_forks_repo_path": "handout-intermediate.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2e1d87bd3d03497b335ba5f936102a9fb5e7afac",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "tamtridung/cheatsheets",
"max_issues_repo_path": "handout-intermediate.tex",
"max_line_length": 83,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "c3b4509bf76fc180621ca1e6433d42742a656759",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "brickbitbot/cheatsheets",
"max_stars_repo_path": "handout-intermediate.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-22T20:52:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-09T21:56:18.000Z",
"num_tokens": 1882,
"size": 7311
} |
\chapter{Implementation Details}
From the previous section we saw that neglecting colour gradient affects shear measurements . Main target of our simulation is
to create a relaistic lensing effect. Galaxies were specified by different parameters and a particular distance from lens. We chose
comparitively simple model ''singular isothermal sphere'' for lens in the simulation. Initially we considered only monochromatic simulation,
i.e. our simulation ran only once for one wavelength and convolution was done using one PSF. Then we modified our programme so that instead
of running for one wavelength the simulation now runs for multiple wavelength range. We
took the infrared wavelength range 1.57$\mu m $-2.00$\mu m $ and didvided this range into 20 parts. During this step galaxies were taken which have different
flux on two different bands ''f606w''(blue) and ''f818''(red).In our simulations we mentioned them as ''blue'' and ''red'' galaxies.Linear
interpolation according to specific wavelength was done by using these galaxies to create galaxy samples which have
a certain percent of red and certain percent of blue flux. These galaxies were then convolved with the PSF of that particular range. We ran
the simulation for $90^{\circ}$ rotated angle also so that when combined the two output the net intrinsic ellipticity of the galaxy
shape can be avoided.
Our aim is to see how the psf affects shear measurements. To do so,I used the programme ``jedisim'' written by Dell Antonio.In this programme we run
a python script named ``jedimaster.py'' . This script reads from a text file named ``config'' . This text file contains the information about different parameters.
``jedimaster.py'' runs six C programmes which creates the simulation step by step
The steps are given below
\paragraph{Postage stamps}:
The images were obtained from an HST UDF image. From that image individual galaxies are converted into 600 by 600 pixel postage stamps.
In the postage stamps the galaxy is isolated on a blank background. The resolution of the images are 0.03 arc second per pixel.We took total 128 postage stamps so that our sample contains diverse
set of galaxy shapes and orientations.
\paragraph{Making the catalog}
The c programme ``jedicatalog.c'' creates a realistic galaxy catalog.This catalog contains the information of galaxy images to be created. There were some parameters which were specified for each galaxy to be created. These paarameters are
discussed below
1.Magnitude
We choose the magnitude of each galaxy to be in the range 22$\leq$M$\leq$28
The distribution is given by Power law
\begin{equation}
P(M+dM)\propto 10^{BM}
\end{equation}
where M is the magnitude and B is the constant $ B = 0.33 $
We take zero point magniyude to be M=30
2.Radius
HdF catalogs contains database of r50 galaxies. That database was binned by integer part of the magnitude
and a list of radii is made for each magnitude bin.As we have already assigned a magnitude for our galaxy this magnitude
corresponds to a particular bin and a r50 radius was randomly chosen from that bin
3.Redshift
In general different galaxies have different redshift but for our simulation we take all the galaxies to be at
a single redshift. All the background galaxies are in redshift $ z = 1.5 $ and lens is at $ z= 0.3 $
4.Position
The center of the postage stamps were selected from the range [301,40960] so that the whole gakaxy fits within image and edge effect can e avoided
5.Angle
As the universe is homogenous and isotropic galaxies are in general randomly oriented in the sky. The 'jedicatalog.c' programme randomly
choose an angle from the range[0,2$\pi$]. In general the orientation has 3 degress of freedom but as we are dealing with 2D projection of galaxies,we can make the orientation random in only 1 degress of freedom
So in general 'Jedicatalog.c' creates a catalog which contains the name of source postage stamps,the radius,magnitude,redshift,angle and the galaxy image name which to be created in the next step.
\paragraph{Transforming the galaxy according to the catalog}:
'Jeditransfom.c' takes the catalog and source postage stamps. The postage stamps are then scaled down to corrected radius, the flux is adjusted for image according to
the magnitude which was specified in the catalog and each galaxy is rotated through the assinged angle.The postage stamps are then cut out so that
the final image contains all the non zero pixel of the galaxy
From Figure 2.1 we see a sample postage stamp and the same stamp(lower figure) transformed according to parameter
\begin{figure}[ht]
\begin{center}
\includegraphics[width=15cm,height=15cm]{pos.jpg}
\label{fig}
\caption{Postage Stamp transformed according to the parameters in catalog}
\end{center}
\end{figure}
\clearpage
\paragraph{Distorting the galaxies}
As we have seen before in the weak field limit for a point mass, the lens equation is
\begin{equation}
\begin{split}
\vec\beta(\vec\theta) &= \vec\theta - \vec\alpha(\theta) \\
&= \theta - \frac{D_{ls}}{D_s} \vec\hat\alpha(\vec\theta)
\end{split}
\end{equation}
We consider here the lens as a symmetric mass distribution with the center at arbitrary positionand redshift at z=0.3 . the deflection term in the lens equation becomes
\begin{equation}
\vec\hat\alpha(\vec\theta) = \alpha(r) {\vec\hat r}
\end{equation}
where $\alpha(r)$ is the radial deflection which depends upon mass distribution.
There are two types of mass distribution :
1. Singular Isothermal Sphere:
2. Navarro Frenk White Profile
We are considering only Singular Isothermal Sphere(SIS) profile. For this profile the density is given by
\begin{equation}
\rho = \frac{\sigma^2_v}{2 \pi Gr^2}
\end{equation}
Here $\sigma_v$ is velocity dispersion, G is universal gravitational constant. When r$\rightarrow$ 0 the density $\rho \rightarrow \infty $.
So we see that at r=0 this is not a physical situation. But as long as it is finitely bounded it constitutes a possible physical distribution and can be used as lens.
The deflection in pixels due to an SIS profile is given by
\begin{equation}
\frac{\alpha(r, \sigma_v) }{r} = \frac{4\pi}{r} (\frac{\sigma_v}{c})^2 S
\end{equation}
Where S is the conversion factor between pixels and radians given by
\begin{equation}
S = \frac{\pi}{180} \frac{3600}{resolution}
\end{equation}
So We have seen that the amount of distortion depends upon the deistance between the lens and galaxy. The lens position was selected
to be (6144,6144).
If we take a galaxy at position(5754,7909) then the distance from the lens to the galaxy is
\begin{equation}
\begin{split}
r_1 &= \sqrt{(6144-5744)^2 + (6144-7909)^2} \\
&= 1809.75
\end{split}
\end{equation}
Similarly if we take another galaxy which is at position(690,5217) the distance from the lens for this second galaxy would be
\begin{equation}
\begin{split}
r_1 &= \sqrt{(6144-690)^2 + (6144-5217)^2} \\
&= 5532.21
\end{split}
\end{equation}
From equation (50) we see that deflection is inversely propotional to r i.e the galaxies are at larger position from lens
would be deflected less than the galaxies at nearby position. So according to the value of $ r_1 $ and $ r_2$ galaxy 1 would be distorted more than galaxy 2
\paragraph{Embedding all the unconvolved image}
'Jedidistort.c' simulated the distortion effect in all the galaxies. It also specified two keywords 'xembed' and 'yembed' in the
fits image . 'xembed' and 'yembed' means x and y cordinate of lower left pixel if the individual image is to be embedded on a larger
image . 'Jedipaste' takes all these 12000 images and embedds them on to a larger image which is 12288 by 12288 pixel.
Due to computer memory allocation problem ''jedipaste.c'' can't embedds all these large number of images into a single huge image
at once.Instead it divides the larger image(12288,12288) into two equal rectangular parts which was called 'bands' in our programme.
So band1 has cordinates where x range is (0,12288) and y range is (0,6144) . band 2 has xrange smae as band 1 but y range is from(6144,1288).
''jedipaste.c'' then takes all these image and embedds images according to their 'xembedd' and 'yembed' co-ordinate position in
first in band 0 and then in band 1.
\paragraph{Convolution with PSF}
The light from the distant galaxies not only gets deflected by the intervening gravitational field of the forground sources but also
gets affected by the Telescope Optics.This effect is known as PSF which we discussed above(Section 1.4)
In general we can write convolution of two functions f(x) and g(x) as
\begin{equation}
f(x)*g(x) = \int f(y)g(x-y)dy
\end{equation}
If there are n data points then from the above equation we see that we need to calculate $ n^2$ tems for each times
For am image with size 12288 pixel by 12288 pixel this is not so practicle. That's why in this case we take Fourier transform of the image and psf and use convolution theorem.
The convolution theorem states that
\begin{equation}
F(f*g) = F(f(x))\bigotimes F(g(x))
\end{equation}
The programme 'Jediconvolve.c' uses the 'fftw3' library to implement this idea. This is a very fast procedure to obtain the convolved image.
In the programme 'jediconvolve.c' it reads the pixels of the image by using the array pImg and pixels of Psf using the array pPsf.
Under the 'fftw3' there are some plans for example we used fftw\_plan\_dft\_r2c\_2d. When these plans are executed we get the Fourier transform of the object.For example
we used fftwf\_execute(pPIMg). takes the fourier transform of the image array. Similarly it also does the same procedure for 'PSF' array.
The image after the convolution is still in the frequency sapce,so the inverse transform was taken to get it in real space.
As the image 'HST0.fits' is very large(12288 by 12288 pixel) the programme cant take it at once. so the programme divides the whole image into 6 bands. so in each band there are 2048 pixels.
Each of this band was Fourier transformed and then convolved with the fourier transform of the PSF and the output of this image was taken reverse fourier transform.
As there are total 6 bands we get 6 images
\paragraph{Making Convolved Image}
The''jediconvolve.c'' creates 6 images. The jedipaste programme once again takes this 6 images and reads the x and y co ordinate of the lower left pixel.
It then divides the whole image into 2 rectangular parts 'band 0' and 'band 1' and embedds these images respectively in those two bands and creates one final image ``HST\_convolved.fits''
From figure 2.2 we can get an idea how the galaxies are embedded in large file
\begin{figure}[ht]
\begin{center}
\includegraphics[width=15cm,height=15cm]{con.jpg}
\label{fig}
\caption{Convolved and distorted galaxy image}
\end{center}
\end{figure}
\paragraph{Rescaling accroding to the resolution of WFIRST}
The resolution of the image ``Jedipaste.c'' creates is 0.03 arcsecond.But the WFIRST has resolution 0.11 arcsecond. So ``jedirescale.c'' mainly scales
down this image into a larger pixelscale and trims off the border.It finds the box which each pixel makes on the image,integrates over the area
under that box,averages and assings that value to new pixel.
\paragraph{Adding Noise to the image}
Finally there are some random noise associated with each image. 'Jedinoise.c' added the poisson noise of mean value 10 to the final
image ``LSST\_convolved\_noise.fits''. WE tried to keep noise as low as possible in our simulation so that it does not effect in shear measurement.
\paragraph{Changes Made to original programme}
\paragraph{Implementing wavelength dependence}
Preliminary we worked only on monochromatic images. We obtained 100 images in ``f608'' filter and another 100 images in ``f814'' filter.These
images are also 0.03 arcsecond per pixel as before.
To test the color dependency we took wavelength at 1.57-2.00 range.Then we divided this range into 20 parts
I edited the Jedisim programme in a way so that unlike the first part instead of running for only one time now it runs for 21 times.
I wrote one programme 'color.c' which would take this blue and red images and creates the final image by linear interpolation
In terms of equation for any run n
\begin{equation}
output image = (1-\frac{n}{20})* blue image + \frac{n}{20}*red image
\end{equation}
We wrote the formula because the wavelength is going from blue to red. So when $n=0$ i.e. the wavelength is 1.57 the blue image would contribute most and when $n=21$
(wavelength 2.00)the red images would be dominant
Previously during the convolution the Psf was monochromatic. But now we need the PSF which would depend upon the wavelenth. We got 21 wavelength dependent PSf from the website.
$http://localhost:8888/notebooks/WebbPSF\-WFIRST\_Tutorial.ipynb$
The PSF's are wavelength dependent so as the wavelength increases the FWHM also increases
It follows the same step as before but instead of one output after ``jedirescale.c'' the programme produces 21 output. I wrote one programme
``avg20.c'' which takes this 21 output. It averages them and create one image ``LSST\_convolved.fits''. In the final step the noise was added using the programme ``Jedinoise.c'' as before
Similarly the programme also runs for the 90 rotated case as before
\chapter{Results}
\subsection{Weighted Average of PSF}
One of the main purpose of the study is to observe how PSF size changes lineraly or follows any particular form as
a function of wavelength.PSFs are in general estimated using stellar spectra. For a given wavelength range different stars
have different spectral energy distribution.Finding flux for a particular PSF in that range enbles us to find flux weighted
average of the PSF. This in turn helps us to compare the size of PSF for different wavelength regions
I downloaded 15 stellar SED text file from the website $http:$//irtfweb.ifa.hawaii.edu$/~spex/IRTF\_Spectral\_Library/$. The SED's were taken for
F,G,M,K,L stars .
I wrote a programme ''readf2.py'' to find flux for each 21 psf files. This programmes reads from the Spectral energy distrinution(SED)
file.For example we need flux at particular value $ x_i$ . So the programme finds from SED file two nearby values $ x_{i-1}$ and $ x_{i+1}$ and the
corresponding flux value $ y_{i-1}$ and $ y_{i+1}$ . Then it linearly interpolates between those two points
So the slope m is
\begin{equation}
m = \frac{y_{i+1}-y_{i-1}}{x_{i+1}-x_{i-1}}
\end{equation}
Once we got the slope ,the y intercept for that straight line is
\begin{equation}
c = y_{i+1} - mx_{i+1}
\end{equation}
So if we now subsitute the m and c in the straight line for a particular wavelength x the flux would be
\begin{equation}
y = mx +c
\end{equation}
So this programme finds flux for each 21 wavelength and writes them to a text file.
We can see from left side of Figure 3.1 when the flux from SED file and the interpolated fluxes are plotted vs wavelength
So the interpolated flux does not agree with the flux from SED. This is due to the fact that we took only nearby two points.Whereas
the SED varies a lot between two wavelength regions
So I wrote another programme ''newprac.py'' which takes the midpoint of each regions. For a particular value it integrates the flux
between those two midpoint values. For example if we need to know the flux at 1.5915(for psf1.fits file) the programme first find the value
at 1.58075 and 1.60225 by linear interpolation. Then it takes all the values between these points from SED files and integrate
over this range.
After making these changes the flux for SED file and psf files were plotted which we can see from right side of
Figure 3.1
I then wrote one programme ''pyfits2.py'' which finds the weighted average for the 21 PSF files.
For example for a star if $f(x_i) $ denotes the data value of the fits file and $x_i $ denotes the corresponding flux then the
weighted average would be
\begin{equation}
\bar x = \frac{\sum_{n=1}^{21} f(x_i)x_i}{\sum_{n=1}^{21} x_i}
\end{equation}
\begin{figure}[ht]
\begin{center}
\includegraphics[width=15cm,height=15cm]{flux.jpg}
\label{fig}
\caption{Integrating flux from SED files for wavelength}
\end{center}
\end{figure}
\clearpage
One of the main prupose is to study how the Psf cahnges with wavelength
To test this I wrote one programme ''inte.py''. this programme finds the flux ratio for two wavelength range one from (1.24-1.57) and
the other one is 1.57-2.00. Based upon the SED files it integrates over the wavlength ranges. Then it finds the absolutte magnitude which
was defined as a variable ''color'' in the programme
the magnitude is given byar
\begin{equation}
m = -2.5 log(\frac{f_1}{f_2})
\end{equation}
where $f_1$ is the flux for first wavelength range and $f_2$ is for the second wavelength range.
| {
"alphanum_fraction": 0.7717810332,
"avg_line_length": 48.590778098,
"ext": "tex",
"hexsha": "d65dac3cbf059860e0e9f965af5b0c0ff9ca51a1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7868d6b01cb58dd295971a62bce8178dd673ed8c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bhishanpdl/Research",
"max_forks_repo_path": "Prospectus/prospectus/sections/riffat.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7868d6b01cb58dd295971a62bce8178dd673ed8c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bhishanpdl/Research",
"max_issues_repo_path": "Prospectus/prospectus/sections/riffat.tex",
"max_line_length": 241,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7868d6b01cb58dd295971a62bce8178dd673ed8c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bhishanpdl/Research",
"max_stars_repo_path": "Prospectus/prospectus/sections/riffat.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4310,
"size": 16861
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Copyright (c) 2011, ETH Zurich.
% All rights reserved.
%
% This file is distributed under the terms in the attached LICENSE file.
% If you do not find this file, copies can be found by writing to:
% ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[a4paper,11pt,twoside]{report}
\usepackage{bftn}
\usepackage{calc}
\usepackage{verbatim}
\usepackage{xspace}
\usepackage{pifont}
\usepackage{textcomp}
\usepackage{amsmath}
\title{Message Notifications}
\author{Barrelfish project}
% \date{\today} % Uncomment (if needed) - date is automatic
\tnnumber{9}
\tnkey{Notifications}
\begin{document}
\maketitle % Uncomment for final draft
\begin{versionhistory}
\vhEntry{1.0}{16.06.2010}{RI}{Initial version}
\end{versionhistory}
% \intro{Abstract} % Insert abstract here
% \intro{Acknowledgements} % Uncomment (if needed) for acknowledgements
% \tableofcontents % Uncomment (if needed) for final draft
% \listoffigures % Uncomment (if needed) for final draft
% \listoftables % Uncomment (if needed) for final draft
\chapter{Overview}
\section{Introduction}
Inter-core messaging on Barrelfish (UMP) is currently based on shared
memory circular buffers and a polling mechanism which is designed to
work efficiently given the cache-coherence protocols of a typical NUMA
multiprocessor system. Communication latency can vary by many orders
of magnitude depending on how frequently the receiving process polls
each channel. This document describes the design and implementation of
a new kernel notification primitive for Barrelfish.
The reason I believe we need an IDC notification path can be seen in
most of the traces I took of Tim's IDC and THC test program (see
email of 21/5/10).
screenshot goes here
If you look on core 0 you see 3 domains polling for incoming URPC
messages. Each polls for a while and then yields, and with 3 domains
it takes up to 10000 cycles to notice a message, and obviously the
current mechanism will scale with the number of domains on the
destination core. (always >= 2!). This could be reduced by moving
polling into the kernel, but if any domain is running we have no way
to pre-empt it until the next timer interrupt (about 18 million
cycles!).
\subsection{Polling and cache coherence}
Sending a message involves the sender modifying a single cache line
which (in the expected case) the receiver is actively polling. The
cache line starts in shared (S) mode in the cache of both sender and
receiver cores. When the sender writes to the cache line this causes a
transition to the owned (O) state and an invalidation of the copy in
the receiver's cache. On most of our NUMA systems, be they
Hypertransport, QPI or shared bus, this is effectively a system-wide
broadcast. Newer AMD Istanbul processors have a directory-based cache
coherency protocol which avoids the broadcast. The receiver then
pulls the modified cache line from the sender's cache resulting in the
cache line being in both caches in the shared (S) state.
\subsection{Message Latency}
When sender and receiver threads are the only things running on each
core this can be extremely low latency (~600 cycles). When the
destination core is shared by multiple threads, or even multiple
domains, the message latency is determined by kernel- and user-mode
scheduling policies and is typically a function of the kernel clock
interrupt rate and the number of domains (and channels) in the system.
Even in simple cases the message latency will usually increase to at
least one timer tick (at least 1ms, probably 10ms - i.e.~millions of
cycles!)
Several Barrelfish papers have talked about sechemes where receiver
domains initially poll for messages, but eventually back off to a more
heavyweight blocking mechanism. In the current tree this involves
domains eventually ``handing off'' the polling of message channels to
their local monitor process via a (blocking) local IPC. The monitor
polls URPC channels for \emph{all} blocked domains an when it finds a
message it sends an IPC to the receiver process causing it to wake.
Since all these cache lines will be ``hot'' in the cache, this is not
as expensive as it might appear, but still does not allow for
preemption of a running thread before the next clock interrupt. It
also potentially captures kernel scheduling policy.
\subsection{Scalability}
Barrelfish does not currently multiplex URPC channels in any way, so
it is common to see $O(N)$ and even $O(N^2)$ URPC channels between
services which run on each core (e.g.~monitors), or between the
dispatchers of a domain which ``spans'' multiple cores. Though the
memory consumption is not a huge problem (a URPC channel is a handful
of cache lines), the number of channels can grow rapidly and this will
have an effect on polling costs and message latency.
Domains do not have any efficient way to identify the (probably) small
set of URPC channels which currently have pending messages. In Nemesis
this was achieved by each domain having a ``hint FIFO'' which
contained a list of channels with \emph{new} incoming messages
(identifying new messages required an explicit \emph{acknowlegement
count} in each channel). However, when a domain was activated it
could efficiently dispatch new messages, and if the FIFO overflowed
then the domain resorted to polling all channels.
In a many-core environment, a single hint FIFO would potentitally be
an expensive bottleneck due to shared-write cache lines accessed by
many cores. Having a FIFO for each potential sending core requires
$O(N)$ space, but would avoid contention.
\subsection{Primitives}
Ideally we need primitives which allow fast URPC-style messaging when
a receiver is known to be polling, but which also allow us to control
sending timely notifications to a remote kernel, domain and thread.
\chapter{Design}
In the global kernel data page have an array of pointers to per-core
notification (PCN) pages. (by the way, why is struct global still
declared in kputchar.h! :-) Each per-core notification page is divided
into cacheline-sized slots. (i.e. 1 page has 64 slots of 64-bytes)
There is one slot for each sending core. Each slot is treated as a
shared FIFO with some agreed number of entries. Each entry can
contain a short channel ID or zero. Each kernel keeps private arrays
of head pointers and tail pointers (need only 1 byte per entry x
num cpus).
On the sending side I have a new system call:
sys$\_$notify(dest$\_$core, dest$\_$chanid);
This looks up the PCN page of the destination core in the kernel globals page.
It then indexes into the PCN page using its own core id to locate the pairwise notification FIFO.
It looks up the FIFO head pointer using the dest$\_$core id.
It then looks at that entry of the FIFO... if it is non-zero then the fifo is full.
If the entry is zero then it writes the dest$\_$chanid and increments the private head pointer.
On the receiving side, the destination core keeps a private index into
each of its incoming fifos. These tells it which entry it needs to
look at next. It could therefore poll each of the FIFOs waiting for a
non-zero channelID value...
The mechanism so far results in a single FIFO cacheline toggling
between Shared and Modified state on both sender and receiver. I
timed 10000 invocations of the above sys$\_$notify() call (with the
receiver core in a tight polling loop) with a cost of 350 cycles per
notification (for a shared L3) and 450 cycles cross-package. Note
that the extra cache traffic of this design is probably not optimal,
but it's in the noise compared to our current IDC costs when we have
$>$1 domain on a core (i.e. always!)
Obviously a tight polling loop on the receiver is not ideal... we
aren't always polling, and in any event this would scale as O(N
cores).
One solution is for sys$\_$notify() to send an IPI to dest$\_$core
whenever the FIFO goes non-empty, or at the request of the receiver
(e.g. if it wrote a special 'request IPI' value into the next empty
slot, rather than zero).
Given the number of CPUs in our current ccNUMA machines, we could
easily afford to use a separate interrupt vector for each sending
core. This would identify to the destination core which FIFO to look
at, with no polling overhead. We could use a single IPI vector, but
this would need some hierarchical shared datastructure to efficiently
identify which fifos to poll. (the PCN entry for src$\_$core ==
dest$\_$core is unused and could be treated as 512 flag-bits c/f Simons
RCK code)
Sending the IPI within sys$\_$notify() would take a few hundred extra
cycles (but may overlap with the cache coherence messages?) Taking
the IRQ and acking it on the receiver is probably between 500 and 1000
cycles depending on which ring the destination core is executing in.
(I tried Richard's HLT in Ring0 with interrupts disabled trick and it
does not work any more!).
One interesting trick might be to deliver notifications to a
hyperthread so that interrupt latency and polling costs were
interleaved with normal processing...and only interrupt the
'application hyperthread' if a reschedule is necessary.
In all of the above cases, I would imagine that notifications would
not necessarily cause the running domain to be pre-empted. However a
scheduler activation for incoming IDC would be possible.
Given that true polling URPC *could* cost only a few hundred cycles if
both domains are in a tight loop, this notification mechanism is not
something I would imagine using on each message. Instead I would
suggest having a 'PUSH' flag you can pass on urpc$\_$send(), or a
b->push() method on the flounder binding so the programmer can decide
to expedite message delivery a suitable points.
\chapter{Implementation}
I just finished doing a more complete version of the UMP Notification
mechanism. It now uses a UmpNotify capability which you get by
retyping your Dispatcher cap. The monitor's UMP binding mechanism
already propagates a notification cap between client and server. I
hand edited the bench.if stubs to allocate the caps and invoke the
notification when doing a message send.
The cap$\_$invoke handler puts the receivers's DCB pointer into the
destination core's incoming notification FIFO and sends an IPI with a
vector identifying the sender core (allowing demux without polling).
The receiving core has an notification IPI handler which drains the
notification fifo and does a cswitch to the most recently notified
domain. The domain will get an activation and poll its URPC channels
(eventually).
After thinking a bit more about the costs of notification, it seems
that 2800 cycles is quite a lot to pay in the default path to send a
notification. Quite a bit of this is the high cost of cap$\_$invoke on
x86$\_$64 (2K cycles) compared to the hacky syscall I was using last week
(~800), but in general we don't want to pay much for notification
unless it's necessary.
I added code in the domain dispatch path to publish the identity of
the currently running DCB, and ideally the time at which it will next
be preempted. The notification kernel code on the sender side can
therefore tell if it's worth sending an IPI and return immediately if
the domain is already running. This leads to the behaviour below
where, just before T=70000 core 1 sends a message to core 2, notices
it isn't the currently running domain and so sends a notification IPI.
The monitor on core2 is preempted and the receiver domain gets to run.
Activation code takes about 2000 cycles but the message gets there
pretty quickly.
This allows me to increase MAX$\_$POLLS$\_$PER$\_$TIMESLICE to 1000
without excessive penalty, which in turn allows the client and server
to remain in the polling loop and notice messages before they yield to
other domains. Net result is that the common case RPC cost is about
4x faster. The worst case is hopefully bounded by the cost of
(cap$\_$invoke + notifiy IPI + domain activation + ump$\_$poll)... about
4000 cycles. I ought to check this by running some ``while(1)''
domains on each core... but I'm fairly (naively?) optimistic.
\section{Performance}
\chapter{Testing and Debugging}
\end{document}
| {
"alphanum_fraction": 0.7770116807,
"avg_line_length": 47.7829457364,
"ext": "tex",
"hexsha": "9e70041dc4619649e2400e7c02392b4c8d7b47e0",
"lang": "TeX",
"max_forks_count": 55,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T05:00:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-03T05:28:12.000Z",
"max_forks_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lambdaxymox/barrelfish",
"max_forks_repo_path": "doc/009-notifications/Notify.tex",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f",
"max_issues_repo_issues_event_max_datetime": "2020-03-18T13:30:29.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-03-22T14:44:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lambdaxymox/barrelfish",
"max_issues_repo_path": "doc/009-notifications/Notify.tex",
"max_line_length": 97,
"max_stars_count": 111,
"max_stars_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lambdaxymox/barrelfish",
"max_stars_repo_path": "doc/009-notifications/Notify.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-01T23:57:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-02-03T02:57:27.000Z",
"num_tokens": 2875,
"size": 12328
} |
\documentclass[12pt,a4paper]{article}
\setlength{\topmargin}{-5mm}
\setlength{\textheight}{244mm}
\setlength{\oddsidemargin}{0mm}
\setlength{\textwidth}{165mm}
\usepackage{fancyvrb}
\DefineVerbatimEnvironment{verbatim}{Verbatim}{xleftmargin=4mm}
\usepackage{lmodern}
\usepackage[T1]{fontenc}
\usepackage{pxfonts}
\usepackage{textcomp}
\usepackage{tabu}
\title{Dijkstra Maps in Roguelikes}
\author{Jeremy Mates}
\date{March 13, 2020}
\usepackage{hyperref}
\hypersetup{pdfauthor={Jeremy Mates},pdftitle={Dijkstra Maps in Roguelikes}}
\begin{document}
\bibliographystyle{plainnat}
\maketitle
\setlength{\parindent}{0pt}
\section*{Simple Pathfinding}
Consider the following map, wherein there are three velociraptors
\texttt{R} surrounding the player \texttt{@} on an open
field\cite{xkcd135}. How can the raptors reach the player?
\begin{verbatim}
......R
.R.....
.......
...@...
.......
.......
..R....
\end{verbatim}
One solution to this problem is the so-called Dijkstra Map\cite{tipodm}.
A map this small can easily be worked out on graph paper; set the
location of the player to the value \texttt{0}. Then in the squares
adjacent to that point write the next highest number, \texttt{1}.
\begin{verbatim}
.......
.......
...1...
..101..
...1...
.......
.......
\end{verbatim}
And so forth until the map is filled. This is a Dijkstra Map (with
Manhattan distances).
\begin{verbatim}
6543456
5432345
4321234
3210123
4321234
5432345
6543456
\end{verbatim}
Note that diagonal moves were not considered\textendash only those
points North South East or West of the given square. This has
ramifications on more complicated maps that contain obstacles.
Meanwhile, each velociraptor can use this map to find the player by
walking ``downhill'' to \texttt{0}, or can flee by picking a cell with a
higher value.
\section*{Obstacles}
Many maps will be more complicated than the above and will contain
obstacles, typically walls, or lava, or there may be monsters that can
pass through walls but not across holy water\textendash specters
perhaps\textendash or anything else you can imagine. Let us for now
only consider physical walls, represented using \texttt{\symbol{35}} as
is typical for roguelikes.
\begin{verbatim}
R....
.###.
.#@..
.#.##
.#.#.
...#.
\end{verbatim}
A Dijkstra Map for this might look like
\vskip 1em%
\begin{tabu} spread 0pt{|X|X|X|X|X|} \hline
8 & 7 & 6 & 5 & 4\\\hline
9 & -1 & -1 & -1 & 3\\\hline
8 & -1 & 0 & 1 & 2\\\hline
7 & -1 & 1 & -1 & -1\\\hline
6 & -1 & 2 & -1 & ?\\\hline
5 & 4 & 3 & -1 & ?\\\hline
\end{tabu}
\vskip 1em%
where the impassable walls are represented by \texttt{-1}. One may use
\texttt{Inf} or an object for such cells, but I assume here an integer
or fixnum array. Another option would be to leave walls at the maximum
integer or \texttt{MOST-POSITIVE-FIXNUM} value and to ignore them while
calculating the costs. Note the two squares lower right that have no
path to the player; these after calculation will typically remain at the
maximum integer value the field was filled with before the pathfinding
pass. This map by the way was made with my \texttt{Game::DijkstraMap}
module\cite{gdm}.
\begin{verbatim}
#!/usr/bin/env perl
use strict;
use warnings;
use Game::DijkstraMap;
my $dm = Game::DijkstraMap->new( str2map => <<'EOM' );
.....
.###.
.#x..
.#.##
.#.#.
...#.
EOM
print $dm->to_tsv;
\end{verbatim}
Pathfinding on this map must ignore \texttt{-1} and look for values
equal to or greater than \texttt{0} when routing to a goal. Here, the
raptor should move horizontally to reach the player in as few moves
as possible.
\section*{Multiple Goals}
A Dijkstra Map may contain multiple goals; pathfinding will find the (or
a) nearest goal. Crabs \texttt{c} for example may wish to retreat to
water \texttt{\symbol{126}} when threatened by the player.
\begin{verbatim}
..~~......~.....
.~~~~......~~...
...~~..c..~~....
..........c.....
...~~...........
..~~...c........
...~~...........
.....~.........@
\end{verbatim}
\vskip 1em%
\begin{tabu} spread 0pt{|X|X|X|X|X|X|X|X|X|X|X|X|X|X|X|X|} \hline
2 & 1 & 0 & 0 & 1 & 2 & 3 & 4 & 2 & 1 & 0 & 1 & 1 & 2 & 3 & 4\\\hline
1 & 0 & 0 & 0 & 0 & 1 & 2 & 3 & 4 & 2 & 1 & 0 & 0 & 1 & 2 & 3\\\hline
2 & 1 & 1 & 0 & 0 & 1 & 2 & 3 & 2 & 1 & 0 & 0 & 1 & 2 & 3 & 4\\\hline
3 & 2 & 2 & 1 & 1 & 2 & 3 & 4 & 3 & 2 & 1 & 1 & 2 & 3 & 4 & 5\\\hline
4 & 3 & 1 & 0 & 0 & 1 & 2 & 3 & 4 & 3 & 2 & 2 & 3 & 4 & 5 & 6\\\hline
2 & 1 & 0 & 0 & 1 & 2 & 3 & 4 & 5 & 4 & 3 & 3 & 4 & 5 & 6 & 7\\\hline
3 & 2 & 1 & 0 & 0 & 1 & 2 & 3 & 4 & 5 & 4 & 4 & 5 & 6 & 7 & 8\\\hline
4 & 3 & 2 & 1 & 1 & 0 & 1 & 2 & 3 & 4 & 5 & 5 & 6 & 7 & 8 & 9\\\hline
\end{tabu}
\vskip 1em%
This map however does not consider other crabs\textendash can two occupy
the same square, or will one need to find a longer route to a free water
cell?\textendash nor the influence of the player; a monster may
realistically have multiple desires: get to water while also avoiding
the player. This can be solved in various ways; moves could be ranked by
utility value--corners might be scored poorly for a fleeing monster (see
e.g. ``Behavioral Mathematics for Game AI''\cite{gameai} for ideas)--or
by using a combination of maps.
\section*{Combining Dijkstra Maps}
Multiple maps may be added together and the combined score used to move
an entity towards or away from goals. Suppose that some
Knight\cite{donq} seeks to make justice upon a fleeing goblin. The
monster, however, has been cornered in a room so simple ``move to the
highest cost cell'' code will leave the monster trapped in a corner.
There should be, in this condition, some means for the monster to slip
past the player.
\begin{verbatim}
######
#....#
#.g.@.
#....#
######
\end{verbatim}
A monster whose state is fleeing needs to be anywhere else; this can be
represented by a map with the monster as the goal, higher values thus
being more desirable. This map can be combined with the map centered on
the player--and also any of their allies, if necessary. Since the
player map will have undesirably low values close to the player, the
resulting map should give a path of higher values for a monster to flee
along step by step.
\begin{verbatim}
###### ###### ######
#2123# #4321# #6444#
#1g123 + #321@1 = #42224
#2123# #4321# #6444#
###### ###### ######
\end{verbatim}
The best single move in this case would be for the goblin to move to
one of the corners (combined cost 6) and for the player to step
towards them, so
\begin{verbatim}
###### ######
#g...# #....#
#..... #..@..
#....# #....#
###### ######
g123 3212 3335
11345 + 21@12 = 33357
2345 3212 5557
\end{verbatim}
The goblin is now stuck on an island (cost 3), so the player
advances again,
\begin{verbatim}
###### ######
#g...# #....#
#..... #.@...
#....# #....#
###### ######
g123 2123 2246
12345 + 1@123 = 22468
2345 2123 4468
\end{verbatim}
This is not productive as even with combined maps the goblin still gets
stuck. Perhaps if we give more weight to the values of one map or the
other, say multiplying the goblin costs by 2?
\begin{verbatim}
###### ######
#g...# #....#
#..... #.@...
#....# #....#
###### ######
g123 2123 2369
11234 + 1@123 = 347AD
2223 2123 67AD
* 2
\end{verbatim}
This looks promising, though the best move is to move into the player,
which might be bad. The easiest solution would be to treat the player
(and any allies) as impassable squares on the goblin's flee map
\begin{verbatim}
###### ######
#g...# #....#
#.#... #.@...
#....# #....#
###### ######
g123 2123 2369
1#234 + 1@123 = 3#7AD
2223 2123 67AD
* 2
\end{verbatim}
Better. The goblin might here roll unlucky and move down instead of
right towards the exit, but that should create conditions that will
cause them to try to move past the player:
\begin{verbatim}
###### ######
#....# #....#
#g#... #.@...
#....# #....#
###### ######
1234 2123 458B
1#456 + 1@123 = 1#9CF
1234 2123 458B
* 2
\end{verbatim}
However the monster may move back and forth between the back wall and an
edge of the room instead of only moving towards the door if the RNG
dictates that, but at least it is not always stuck, and it may roll well
and escape. \\
Another method would be to construct a line or path to the highest value
on the monster's map instead of only looking for the single best move in
any given turn; this would avoid the possibility of the monster getting
stuck over multiple turns though may be more expensive to calculate and
store. The monster's map also need not comprise the entire field; it may
only need to be slightly larger than the player's field of vision. \\
Different or non-integer weights can be used, or the maps could be
subtracted instead of being added, though experimentation shows that
adding the maps and applying a small positive integer weight to the
monster flee map better avoids problems such as the monster islanding
itself in a corner. \\
Play testing with different approaches and map weights will likely be
necessary. Level design may also be a factor, 3x3 rooms give an obvious
path for the player to try to box a monster in with; larger rooms or
rooms with multiple exits or rooms with rounded corners should create
different weights for the monsters to flee along.
\section*{The Diagonal}
Costs in the above maps have been done without consideration for
diagonal moves. Consider the following map, where \texttt{x} is the
goal.
\begin{verbatim}
@#..
#...
...x
\end{verbatim}
\vskip 1em%
\begin{tabu} spread 0pt{|X|X|X|X|} \hline
? & -1 & 3 & 2\\\hline
-1 & 3 & 2 & 1\\\hline
3 & 2 & 1 & 0\\\hline
\end{tabu}
\vskip 1em%
The player here cannot pathfind to the goal as the diagonal move was not
considered by the 4-way algorithm that only consults cells North South
East and West. Various roguelikes (Angband, Dungeon Crawl Stone Soup)
permit such diagonal moves, so will need to use an 8-way algorithm when
constructing a Dijkstra Map. Other roguelikes (Brogue, POWDER) may deny
such diagonal moves so can use the 4-way algorithm. This choice also
influences level design. 4-way and 8-way roguelikes require rather
different diagonal corridors:
\begin{verbatim}
8-way @#### 4-way @.###
corridor #.### corridor #..##
##x## ##x##
\end{verbatim}
Brogue (like rogue) is complicated in that it allows some diagonal
moves. A 4-way Dijkstra Map algorithm can be used with 8-way motion
provided 4-way moves are possible to everywhere that must be reached.
Diagonal moves in such a case exist as shortcuts\textendash moving
diagonally along the above 4-way corridor (which Brogue does not permit,
nor POWDER unless one is ploymorphed into a grid bug).
\section*{Diagonal Maps}
8-way maps typically assign equal costs to all adjacent squares. The
original raptor map instead with Chebyshev distances:
\begin{verbatim}
......R 3333333
.R..... 3222223
....... 3211123
...@... 3210123
....... 3211123
....... 3222223
..R.... 3333333
\end{verbatim}
This while traditional for roguelikes is not actually correct; diagonals
under euclidean geometry should instead use $\sqrt{x^2 + y^2}$ or
$\sqrt{2}$ instead of \texttt{1} for the closest diagonal. Various
roguelikes are actually non-euclidean: Brogue and Dungeon Crawl Stone
Soup apply the same cost to a move in any direction, diagonal or
otherwise. Anyways! Our original diagonal map that stumped the 4-way map
under (non-euclidean) 8-way is:
\begin{verbatim}
@#..
#...
...x
\end{verbatim}
\vskip 1em%
\begin{tabu} spread 0pt{|X|X|X|X|} \hline
3 & -1 & 2 & 2\\\hline
-1 & 2 & 1 & 1\\\hline
3 & 2 & 1 & 0\\\hline
\end{tabu}
\vskip 1em%
And the player can path to the goal.
\section*{Not Just for Animates}
Dijkstra Map can be used for other purposes. Given a map with two rooms
in it, how to connect them?
\begin{verbatim}
####################
###########XXXXXX###
###########X....X###
###########X....X###
#XXXXXX####XXXXXX###
#X....X#############
#X....X#############
#X....X#############
#XXXXXX#############
####################
\end{verbatim}
Place a goal anywhere in the wall space, and then considering only walls
\texttt{\symbol{35}} as passable construct a Dijkstra Map with the goal
as the destination, here shown partially complete.
\begin{verbatim}
XXXXXX
X....X8
DCBA98X....X7
XXXXXXA987XXXXXX6
X....X98765432345
X....X87654321234
X....X76543210123
XXXXXX 765432123
\end{verbatim}
Then pick a random door location in each room, and pathfind from each
door to the goal, drawing a corridor along the way.
\begin{verbatim}
####################
###########XXXXXX###
###########X....+.##
#####...###X....X.##
#XXXX+X.###XXXXXX.##
#X....X...#######.##
#X....X##.........##
#X....X#######..####
#XXXXXX#############
####################
\end{verbatim}
This will run into complications if rooms are adjacent or worse two
rooms block a third from reaching a goal (there should likely be one
goal per unconnected wall space in a map) though there are various
solutions to these challenges, such as pathfinding through rooms or
using additional code to find and link up adjacent rooms.
\section*{Super Dimensional Dijkstra Maps}
We need not confine ourselves to two, or even three dimensions;
Dijkstra Maps can be built in arbitrary numbers of dimensions
(memory requirements, implementation demands, and sanity
permitting). The following is a four-dimensional map with an
implementation\cite{crazylisp} that does not consider diagonal
moves legal.
\begin{verbatim}
% sbcl --noinform --load dijkstramap.lisp
* (setf *dimap-cost-max* 99)
99
* (defparameter level
(make-array '(3 3 3 3)
:initial-contents
'((((99 -1 99) (-1 99 -1) (99 -1 99))
((-1 99 99) (99 99 99) (99 99 -1))
((99 99 99) (99 99 99) (99 99 99)))
(((-1 99 99) (99 99 99) (99 99 -1))
((99 99 99) (99 0 99) (99 99 99))
((99 99 99) (99 99 99) (99 99 99)))
(((99 99 99) (99 99 99) (99 99 99))
((99 99 99) (99 99 99) (99 99 99))
((99 99 99) (99 99 99) (99 99 99))))))
LEVEL
* (dimap-calc level)
4
* level
#4A((((99 -1 4) (-1 2 -1) (4 -1 99))
((-1 2 3) (2 1 2) (3 2 -1))
((4 3 4) (3 2 3) (4 3 4)))
(((-1 2 3) (2 1 2) (3 2 -1))
((2 1 2) (1 0 1) (2 1 2))
((3 2 3) (2 1 2) (3 2 3)))
(((4 3 4) (3 2 3) (4 3 4))
((3 2 3) (2 1 2) (3 2 3))
((4 3 4) (3 2 3) (4 3 4))))
*
\end{verbatim}
Productive uses for such maps are left as an exercise to the reader.
\clearpage
\bibliography{references}
\end{document}
| {
"alphanum_fraction": 0.6479484571,
"avg_line_length": 29.3725099602,
"ext": "tex",
"hexsha": "3c9f73c17b197da7e261eaae4bc237a8912746ad",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "586d8a9a853612f8f52066253284c3befe6f030a",
"max_forks_repo_licenses": [
"0BSD"
],
"max_forks_repo_name": "thrig/ministry-of-silly-vaults",
"max_forks_repo_path": "dijkstramap.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "586d8a9a853612f8f52066253284c3befe6f030a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"0BSD"
],
"max_issues_repo_name": "thrig/ministry-of-silly-vaults",
"max_issues_repo_path": "dijkstramap.tex",
"max_line_length": 76,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "586d8a9a853612f8f52066253284c3befe6f030a",
"max_stars_repo_licenses": [
"0BSD"
],
"max_stars_repo_name": "thrig/ministry-of-silly-vaults",
"max_stars_repo_path": "dijkstramap.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-28T15:36:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-11-29T01:45:30.000Z",
"num_tokens": 4663,
"size": 14745
} |
\subsection{Topological groups}\label{subsec:topological_groups}
\begin{definition}\label{def:topological_group}
Let \( G \) be any \hyperref[def:group]{group} and let \( \mscrT \) be a topology on \( G \). The tuple \( (G, \cdot, \mscrT) \) is called a \term{topological group} if the group structure and topological structure agree, that is, the operations \( \cdot: X \times X \to X \) and \( (-)^{-1}: X \to X \) are continuous with respect to \( \mscrT \).
See \fullref{rem:hausdorff_topological_groups} and \fullref{def:category_of_topological_groups} for more nuances.
\end{definition}
\begin{remark}\label{rem:hausdorff_topological_groups}
It is conventional to require the topology in a topological group to be \( T_1 \) (see \fullref{def:separation_axioms}). We will not do this due to our goal of not assuming more than is necessary.
Due to \fullref{thm:topological_group_t0_iff_t3.5}, it is immaterial whether we require the topology to be \( T_0 \) or \( T_{3.5} \) or anywhere in between. It is customary to call the space \enquote{Hausdorff} (although stronger separation axioms actually hold) and require \( T_1 \) to hold (since it is simple to state).
We will explicitly mention when we want a topological group to be Hausdorff. This is usually, so when we speak of convergence.
\end{remark}
\begin{definition}\label{def:category_of_topological_groups}
The category \( \cat{TopGrp} \) of topological groups is a subcategory of both \( \cat{Top} \) and \( \cat{Grp} \). Its morphisms are the \hyperref[def:global_continuity]{continuous} group \hyperref[thm:group_homomorphism_single_condition]{homomorphisms}.
\end{definition}
\begin{proposition}\label{thm:neighborhood_translations_in_topological_groups}
Fix \( x, y \in G \) in a topological group \( G \). If \( U \) is a neighborhood of \( x \), then both \( V = yx^{-1} U \) and \( W = U x^{-1}y \) are neighborhoods of \( y \).
\end{proposition}
\begin{proof}
Since the group operations are continuous, for fixed \( x \) and \( y \), the function \( f(z) \coloneqq xy^{-1}z \) is continuous.
Note that \( U = f(V) \), hence \( V \) is the preimage of \( U \) under \( f \) and it follows from the continuity of \( f \) that \( V \) is open.
Since \( x \in U \), \( yx^{-1}x = ye = y \in V \). Therefore, \( V \) is a neighborhood of \( y \).
The proof that \( W \) is a neighborhood of \( y \) is analogous.
\end{proof}
\begin{corollary}\label{thm:origin_neighborhoods_in_topological_groups}
In a topological group \( G \), every neighborhood is a translation of e neighborhood of the origin \( e \).
\end{corollary}
\begin{remark}\label{rem:origin_neighborhoods_in_topological_groups}
\Fullref{thm:origin_neighborhoods_in_topological_groups} provides a lot of uniformity by allowing us to only consider neighborhoods of zero when working with topological groups.
\end{remark}
\begin{proposition}\label{thm:topological_group_t0_iff_t3.5}
If a topological group is \( T_0 \), it is automatically \( T_{3.5} \).
\end{proposition}
\begin{proposition}\label{thm:topological_group_uniform_space}
A Hausdorff topological group \( G \) can be made into a uniform space by the families of entourages
\begin{balign*}
& V^l_A \coloneqq \{ (x, y) \in G \times G \colon x^{-1} y \in A \}, \\
& V^r_A \coloneqq \{ (x, y) \in G \times G \colon x y^{-1} \in A \},
\end{balign*}
where \( A \) is a \hyperref[def:neighborhood_set_types/symmetric]{symmetric} neighborhood of the origin \( e \).
If \( G \) is abelian, the two families of entourages coincide.
\end{proposition}
\begin{proposition}\label{thm:limits_are_topological_group_homomorphisms}
If \( \{ a_\alpha \}_{\alpha \in \mscrK} \) and \( \{ b_\alpha \}_{\alpha \in \mscrK} \) are \hyperref[def:topological_net]{nets} in a Hausdorff topological group \( X \) that converge to \( a \) and \( b \), correspondingly, then \( a_\alpha b_\alpha \to a b \).
\end{proposition}
\begin{proof}
Special case of \fullref{thm:linearity_of_sequence_limits}.
\end{proof}
| {
"alphanum_fraction": 0.7063334983,
"avg_line_length": 64.1587301587,
"ext": "tex",
"hexsha": "77484afa6e3fd2268cf5ce64a53666e3ee6506b2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "v--/anthology",
"max_forks_repo_path": "src/topological_groups.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "v--/anthology",
"max_issues_repo_path": "src/topological_groups.tex",
"max_line_length": 350,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "89a91b5182f187bc1aa37a2054762dd0078a7b56",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "v--/anthology",
"max_stars_repo_path": "src/topological_groups.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1233,
"size": 4042
} |
\ifpdf
\graphicspath{{Chapter11/Figs/Raster/}{Chapter11/Figs/PDF/}{Chapter11/Figs/}}
\else
\graphicspath{{Chapter11/Figs/Vector/}{Chapter11/Figs/}}
\fi
\chapter{Conclusion}
\zw{Write concluding remarks}
| {
"alphanum_fraction": 0.7323943662,
"avg_line_length": 21.3,
"ext": "tex",
"hexsha": "b8ed579b79f2f7bce22917f30c341ffad156f9ff",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "25f7f157a0d334e3bd55be13174b2cb1a1fd3329",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ZeerakW/Dissertation",
"max_forks_repo_path": "Chapter9/chapter9.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "25f7f157a0d334e3bd55be13174b2cb1a1fd3329",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ZeerakW/Dissertation",
"max_issues_repo_path": "Chapter9/chapter9.tex",
"max_line_length": 81,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "25f7f157a0d334e3bd55be13174b2cb1a1fd3329",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zeeraktalat/Dissertation",
"max_stars_repo_path": "Chapter9/chapter9.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-28T14:05:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-28T14:05:32.000Z",
"num_tokens": 68,
"size": 213
} |
\documentclass{article}
\usepackage{amssymb}
\usepackage{comment}
\usepackage{courier}
\usepackage{fancyhdr}
\usepackage{fancyvrb}
\usepackage[T1]{fontenc}
\usepackage[top=.75in, bottom=.75in, left=.75in,right=.75in]{geometry}
\usepackage{graphicx}
\usepackage{lastpage}
\usepackage{listings}
\lstset{basicstyle=\small\ttfamily}
\usepackage{mdframed}
\usepackage{parskip}
\usepackage{ragged2e}
\usepackage{soul}
\usepackage{upquote}
\usepackage{xcolor}
\usepackage[ampersand]{easylist}
% http://www.monperrus.net/martin/copy-pastable-ascii-characters-with-pdftex-pdflatex
\lstset{
upquote=true,
columns=fullflexible,
literate={*}{{\char42}}1
{-}{{\char45}}1
{^}{{\char94}}1
}
\lstset{
moredelim=**[is][\color{blue}\bf\small\ttfamily]{@}{@},
}
% http://tex.stackexchange.com/questions/40863/parskip-inserts-extra-space-after-floats-and-listings
\lstset{aboveskip=6pt plus 2pt minus 2pt, belowskip=-4pt plus 2pt minus 2pt}
\usepackage[colorlinks,urlcolor={blue}]{hyperref}
\begin{document}
\fancyfoot[L]{\color{gray} C4CS -- F'18}
\fancyfoot[R]{\color{gray} Revision 1.0}
\fancyfoot[C]{\color{gray} \thepage~/~\pageref*{LastPage}}
\pagestyle{fancyplain}
\title{\textbf{Office Hours ++ (Unit Testing and Python)\\}}
\author{\textbf{\color{red}{Due: Sunday, November 11th, 11:59PM (Hard Deadline)}}}
\date{}
\maketitle
\section*{Submission Instructions}
Submit this assignment on \href{https://gradescope.com/courses/24368}{Gradescope}.
You may find the free online tool \href{https://www.pdfescape.com}{PDFescape}
helpful to edit and fill out this PDF.
You may also print, handwrite, and scan this assignment.
\section*{Adding Features to Your Python RPN Calculator}
Building off of what was covered in lecture, work on using the test-driven development methodology and implement \textbf{three (3)} of the features from the list below. Note that this list is not comprehensive, so if there is a different feature that you would like to build that is non-trivial (e.g. implementing a function that would just change the sign on a number would be trivial), you may choose to build that instead.
In order to receive credit, you will need to prove the test cases for your additional functions initially fail, and then later pass when the function is fully implemented.
\subsection*{Feature List:}
Choose \textbf{three (3)} of the following features to build. You may elect to build any number of custom features and each will count as a separate feature - you are not restricted to just one custom feature.
\begin{itemize}
\item[$\square$] \textbf{Implement additional calculator keys}
For this feature, implement the following subfeatures:
\begin{itemize}
\item[$\square$] Calculate percentages using the \texttt{\%}. You may find \href{https://blogs.msdn.microsoft.com/oldnewthing/20080110-00/?p=23853}{this article} helpful.
\end{itemize}
\begin{itemize}
\item[$\square$] Calculate exponent using \texttt{\^}.
\end{itemize}
\begin{itemize}
\item[$\square$] Perform integer division using \texttt{//}.
\end{itemize}
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Implement bitwise operators (and, or and not)}
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Implement a basic math library}
For this feature, implement the following subfeatures:
\begin{itemize}
\item[$\square$] Allow for usage of constants (\texttt{pi}, \texttt{e}, ...)
\end{itemize}
\begin{itemize}
\item[$\square$] Binary functions
\end{itemize}
\begin{itemize}
\item[$\square$] Unary functions (\texttt{sin}, \texttt{cos}, etc.)
\end{itemize}
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Degrees and Radians Mode}
This would follow from the above feature. Add a command/method which can set whether trigonometric operations use degrees or radians. For example, by entering the keyword \texttt{rad}, if the operation \texttt{3.1415 sin} is entered, the output would be \texttt{0}. Equivalently, if the mode is set using \texttt{deg}, if the operation \texttt{360 cos} is entered, the output would be \texttt{1}.\newline
\end{itemize}
\newpage
\begin{itemize}
\item[$\square$] \textbf{Use the Results of a Previous Calculation}
Add the ability to use the results of your previous calculation in the next calculation. For example, if we have the first input as \texttt{2 3 +} followed by the next command \texttt{:ans 7 +}, the output should be \texttt{12}. In order to implement this, you will need to define a language - in the example we used the colon(:) to denote this special variable \newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Summation}
Implement a command that find the sum of all of the elements on the stack and adds this result to the top of the stack.\newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Different Base Number Systems}
Add a command or method which will allow the user to use your calculator for calculations in different base number systems. For example, using an option \texttt{hex} and then entering the input \texttt{A A +} should result in the output \texttt{14}.\newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Factorial Operator}
Implement the factorial operator. For example, the input \texttt{4 !} should return the output \texttt{24}. \newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Error Handling for Division by Zero}
Implement an error handling method that perevents division by 0 errors and saves the user's existing state. For example, if the stack initually contained \{\texttt{1}\} and a user enters \texttt{4 0 /}, output a helpful error message and preserve the initial stack before these values were entered (i.e. \{\texttt{1}\}). \newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Session History}
Implement a command such that when it is called, it outputs the standard format of the previous operation. For example, if the previous input was \texttt{3 2 +}, calling this command will print \texttt{3 + 2 = 5}.\newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Convert Between Decimal and Fraction}
Implement a command to convert the item on the stack from decimal to fraction and vice-versa. For example, calling this command when the item at the top of the stack is \texttt{0.75}, the output should return \texttt{3/4}.\newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Repeat Operator}
Implement a repeat operator that will repeatedly carry out a provided binary operation on all items provided in the input line. For example, the input \texttt{4 2 6 * !} (where ! is the repeat operator) would result in the output \texttt{48}, regardless of whatever was previously on the stack.\newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Rotate}
Implement a command that will rotate the order of all items currently on the stack. For example, if the stack currently contains \{\texttt{2 4 6}\}, after calling this operator the stack should be \{\texttt{6 4 2}\}.\newline
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Copy}
Implement a command that will add a copy of the current top element to the stack. For example, if the stack currently contains \{\texttt{2 4 6}\}, after calling this operator the stack should be \{\texttt{2 2 4 6}\}.\newline
\end{itemize}
\newpage
\begin{itemize}
\item[$\square$] \textbf{Allow a Persistent Stack}
Make the stack for your RPN calculator persistent. For example, the input \texttt{1 2 3 +} should not produce any errors. From this, the prompt should also be customized to display information about the stack. Using the previous example, the prompt could now be customized to be \texttt{rpn calculator [1, 5]}, or something similar.
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Add a Memory System}
Add a basic memory system to your calculator. This would be equivalent to the \texttt{M+}, \texttt{M-}, \texttt{MR} and \texttt{MC} on a regular calculator. This could be extended to add a near infinite amount of memory registers by defining another special character (as in the "Use the Results of a Previous Calculation). For example, these registers could be called using the \texttt{\&} key: \texttt{\&myval+}, \texttt{\&c4cs-}, etc.
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Read data in from an external file}
Add the ability to read in data from an external file for your calculator. This external file could be formatted in a style of your choice (csv, tsv, etc.).
\end{itemize}
\begin{itemize}
\item[$\square$] \textbf{Custom Feature!}
If there is a meaningful command not listed above and currently not already implemented by the calculator, you may choose to implement this instead. For example, you could choose to use other Python libraries (such as numbers, cmath, decimal, fractions, statistics, random, NumPy, SciPy, etc) to add new features. \newline
\end{itemize}
\newpage
\section*{Feature 1:}
\subsection*{Test Code:}
\vspace*{5cm}
\subsection*{Screenshot of Failing Test:}
\vspace*{5cm}
\subsection*{Implementation Code:}
\vspace*{5cm}
\subsection*{Screenshot of Passing Test:}
\vspace*{5cm}
\newpage
\section*{Feature 2:}
\subsection*{Test Code:}
\vspace*{5cm}
\subsection*{Screenshot of Failing Test:}
\vspace*{5cm}
\subsection*{Implementation Code:}
\vspace*{5cm}
\subsection*{Screenshot of Passing Test:}
\vspace*{5cm}
\newpage
\section*{Feature 3:}
\subsection*{Test Code:}
\vspace*{5cm}
\subsection*{Screenshot of Failing Test:}
\vspace*{5cm}
\subsection*{Implementation Code:}
\vspace*{5cm}
\subsection*{Screenshot of Passing Test:}
\vspace*{5cm}
\end{document}
| {
"alphanum_fraction": 0.745687429,
"avg_line_length": 41.5493562232,
"ext": "tex",
"hexsha": "ce1413c391ee9a1c313d6e21d700acb805f35142",
"lang": "TeX",
"max_forks_count": 349,
"max_forks_repo_forks_event_max_datetime": "2020-11-04T05:38:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-01-06T04:13:55.000Z",
"max_forks_repo_head_hexsha": "4df2319f5d894a2fe3daef12d43772611130a9a0",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "wangjess/c4cs.github.io",
"max_forks_repo_path": "static/f18/hw/c4cs-wk9b-homework.tex",
"max_issues_count": 622,
"max_issues_repo_head_hexsha": "4df2319f5d894a2fe3daef12d43772611130a9a0",
"max_issues_repo_issues_event_max_datetime": "2020-02-25T07:29:08.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-01-22T06:17:25.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "wangjess/c4cs.github.io",
"max_issues_repo_path": "static/f18/hw/c4cs-wk9b-homework.tex",
"max_line_length": 437,
"max_stars_count": 49,
"max_stars_repo_head_hexsha": "4df2319f5d894a2fe3daef12d43772611130a9a0",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "wangjess/c4cs.github.io",
"max_stars_repo_path": "static/f18/hw/c4cs-wk9b-homework.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-08T03:21:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-01-05T02:42:14.000Z",
"num_tokens": 2774,
"size": 9681
} |
\documentclass[a4paper]{book}
\usepackage[times,inconsolata,hyper]{Rd}
\usepackage{makeidx}
\usepackage[utf8]{inputenc} % @SET ENCODING@
% \usepackage{graphicx} % @USE GRAPHICX@
\makeindex{}
\begin{document}
\chapter*{}
\begin{center}
{\textbf{\huge Package `ConvenienceFunctions'}}
\par\bigskip{\large \today}
\end{center}
\inputencoding{utf8}
\ifthenelse{\boolean{Rd@use@hyper}}{\hypersetup{pdftitle = {ConvenienceFunctions: Convenience functions for R for QBS181}}}{}\ifthenelse{\boolean{Rd@use@hyper}}{\hypersetup{pdfauthor = {Carly Bobak}}}{}\begin{description}
\raggedright{}
\item[Type]\AsIs{Package}
\item[Title]\AsIs{Convenience functions for R for QBS181}
\item[Version]\AsIs{0.1.0}
\item[Author]\AsIs{Carly Bobak}
\item[Description]\AsIs{We proide general utilities for common taks in data wrangling}
\item[License]\AsIs{MIT}
\item[Depends]\AsIs{R (>= 3.5.0)}
\item[Encoding]\AsIs{UTF-8}
\item[LazyData]\AsIs{true}
\item[Imports]\AsIs{stats,
ggplot2}
\item[RoxygenNote]\AsIs{7.1.2}
\end{description}
\Rdcontents{\R{} topics documented:}
| {
"alphanum_fraction": 0.7490458015,
"avg_line_length": 34.9333333333,
"ext": "tex",
"hexsha": "4ffb2a056acee08b6727f126e308696da2d86a53",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d240ea8e26d63698a2004635029b36b4d2f674ab",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cbutler-gr/QBS181_ConvenienceFunctions",
"max_forks_repo_path": "R/convience_functions/.Rd2pdf12756/Rd2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d240ea8e26d63698a2004635029b36b4d2f674ab",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cbutler-gr/QBS181_ConvenienceFunctions",
"max_issues_repo_path": "R/convience_functions/.Rd2pdf12756/Rd2.tex",
"max_line_length": 221,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d240ea8e26d63698a2004635029b36b4d2f674ab",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cbutler-gr/QBS181_ConvenienceFunctions",
"max_stars_repo_path": "R/convience_functions/.Rd2pdf12756/Rd2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 363,
"size": 1048
} |
\chapter{Input Parameters}
\label{ch:input}
\newcommand{\param}[5]{{\setlength{\parindent}{0cm} {\ttfamily \bfseries \hypertarget{#1}{#1}}\\{\it Type}: #2\\{\it Default}: #3\\{\it When it matters}: #4\\{\it Meaning}: #5}}
\newcommand{\ssparam}[4]{{\setlength{\parindent}{0cm} {\ttfamily \bfseries \hypertarget{#1}{#1}}\\{\it Type}: #2\\{\it When it matters}: #3\\{\it Meaning}: #4}}
\newcommand{\PETScParam}[2]{{\setlength{\parindent}{0cm} {\ttfamily \bfseries #1}\\{\it Meaning}: #2}}
\newcommand{\myhrule}{{\setlength{\parindent}{0cm} \hrulefill }}
\newcommand{\true}{{\ttfamily .true.}}
\newcommand{\false}{{\ttfamily .false.}}
In this chapter we first describe all the parameters which can be included in the {\ttfamily input.namelist} file.
Then we list some of the command-line flags associated with \PETSc~which can be useful.
Note that all parameters in {\ttfamily input.namelist}, both for \sfincs~and \sfincsScan,
are case-insensitive.
\section{The {\ttfamily general} namelist}
The default values are usually best for the parameters in this namelist.
\myhrule
\param{RHSMode}
{integer}
{1}
{Always}
{Option related to the number of right-hand sides (i.e. inhomogeneous drive terms) for which the kinetic equation is solved.\\
{\ttfamily RHSMode} = 1: Solve for a single right-hand side.\\
{\ttfamily RHSMode} = 2: Solve for 3 right-hand sides to get the 3$\times$3 transport matrix. Presently implemented only for 1 species.\\
{\ttfamily RHSMode} = 3: Solve for the 2$\times$2 monoenergetic transport coefficients. When this option is chosen, \Nx~is set to 1 and only 1 species is used.
}
\myhrule
\param{outputFileName}
{string}
{``sfincsOutput.h5''}
{Always}
{Name which will be used for the HDF5 output file. If this parameter is changed from the default value, \sfincsScan~ will not work.}
\myhrule
\param{saveMatlabOutput}
{Boolean}
{\false}
{Always}
{If this switch is set to true, Matlab m-files are created which
store the system matrix, right-hand side, and solution vector. If an iterative solver is used,
the preconditioner matrix is also saved.
PETSc usually generates an error message if you ask to save Matlab output when
the size of the linear system is more then 1400 $\times$ 1400,
so usually this setting should be false except for very small test problems.
}
\myhrule
\param{MatlabOutputFilename}
{string}
{``sfincsMatrices''}
{Only when \parlink{saveMatlabOutput} == \true.}
{Start of the filenames which will be used for Matlab output.}
\myhrule
\param{saveMatricesAndVectorsInBinary}
{Boolean}
{\false}
{Always}
{If this switch is set to true, the matrix, preconditioner matrix, right-hand-side, and solution vector of the
linear system will be saved in PETSc's binary format.
These matrices and vectors are not very interesting for routine use of the code,
only for code development and debugging.
Regardless of how this parameter is set,
the physically interesting input and output quantities will be saved in a
separate \HDF~file.
}
\myhrule
\param{binaryOutputFilename}
{string}
{``sfincsBinary''}
{Only when \parlink{saveMatricesAndVectorsInBinary} == \true.}
{Start of the filenames which will be used for binary output of the system
matrices, right-hand-side vectors, and solution vectors.
These matrices and vectors are not very interesting for routine use of the code,
only for code development and debugging.
Regardless of how this parameter is set,
the physically interesting input and output quantities will be saved in a
separate \HDF~file.
}
\myhrule
\param{solveSystem}
{Boolean}
{\true}
{Always}
{If this parameter is false, the system of equations will not actually be solved.
Sometimes it can be useful to set this parameter to \false~when debugging.
}
\section{The {\ttfamily geometryParameters} namelist}
\label{sec:geometryParameters}
The parameters in this namelist define the magnetic geometry, and so you will almost certainly want to modify some of these parameters.
\myhrule
\param{geometryScheme}
{integer}
{1}
{Always}
{How the magnetic geometry is specified.\\
{\ttfamily geometryScheme}==1: Use the following 3-helicity model:\\
\begin{eqnarray}
\label{eq:Bmodel}
B(\theta,\zeta)/\bar{B}
&=& (\mbox{\parlink{B0OverBBar}}) [1 + (\mbox{\parlink{epsilon\_t}}) \cos(\theta) \\
&& + (\mbox{\parlink{epsilon\_h}}) \cos((\mbox{\parlink{helicity\_l}}) \theta - (\mbox{\parlink{helicity\_n}}) \zeta) \nonumber \\
&& + (\mbox{\parlink{epsilon\_antisymm}}) \nonumber \\
&& \times \sin((\mbox{\parlink{helicity\_antisymm\_l}}) \theta - (\mbox{\parlink{helicity\_antisymm\_n}}) \zeta)] \nonumber
\end{eqnarray}
(All the variables in this formula are discussed later in this namelist.)\\
{\ttfamily geometryScheme}==2: Use a 3-helicity model of the LHD standard configuration at {\ttfamily rN}=0.5.\\
{\ttfamily geometryScheme}==3: Use a 4-helicity model of the LHD inward-shifted configuration at {\ttfamily rN}=0.5.\\
{\ttfamily geometryScheme}==4: Use a 3-helicity model of the W7-X standard configuration at {\ttfamily rN}=0.5.\\
{\ttfamily geometryScheme}==5: Read the {\ttfamily vmec wout} file specified in \parlink{equilibriumFile} below. The file can be
either ASCII format or \netCDF~format. (\sfincs~will auto-detect the format.).\\
{\ttfamily geometryScheme}==11: Read the IPP {\ttfamily .bc} format Boozer-coordinate file specified in \parlink{equilibriumFile} below.
The file is assumed to be stellarator-symmetric.\\
{\ttfamily geometryScheme}==12: Read the IPP {\ttfamily .bc} format Boozer-coordinate file specified in \parlink{equilibriumFile} below.
The file is assumed to be stellarator-\emph{asymmetric}.
}
\myhrule
\param{inputRadialCoordinate}
{integer}
{3}
{When \parlink{geometryScheme} == 1, 5, 11, or 12}
{Which radial coordinate to use to specify the flux surface for a single calculation,
or to specify the range of flux surfaces for a radial scan.
(Regardless of the value of this parameter, when \parlink{geometryScheme} == 2, 3, or 4, the flux surface used will be {\ttfamily rN}=0.5.)
See section \ref{sec:radialCoordinates}
for more information about radial coordinates.\\
{\ttfamily inputRadialCoordinate}==0: Use the flux surface specified by \parlink{psiHat\_wish}
for a single run, and use the range specified by \parlink{psiHat\_min} and \parlink{psiHat\_max} for radial scans.\\
{\ttfamily inputRadialCoordinate}==1: Use the flux surface specified by \parlink{psiN\_wish}
for a single run, and use the range specified by \parlink{psiN\_min} and \parlink{psiN\_max} for radial scans.\\
{\ttfamily inputRadialCoordinate}==2: Use the flux surface specified by \parlink{rHat\_wish}
for a single run, and use the range specified by \parlink{rHat\_min} and \parlink{rHat\_max} for radial scans.\\
{\ttfamily inputRadialCoordinate}==3: Use the flux surface specified by \parlink{rN\_wish}
for a single run, and use the range specified by \parlink{rN\_min} and \parlink{rN\_max} for radial scans. \\
No matter which option you pick, the value of all 4 radial coordinates used will be saved in the output \HDF~file.
}
\myhrule
\param{inputRadialCoordinateForGradients}
{integer}
{4}
{Whenever \parlink{RHSMode}==1.}
{Which radial coordinate is used to use to specify the input gradients of density, temperature, and electrostatic potential,
i.e. which radial coordinate is used in the denominator of these derivatives.
See section \ref{sec:radialCoordinates}
for more information about radial coordinates.\\
{\ttfamily inputRadialCoordinateForGradients}==0: Density gradients are specified by \parlink{dnHatdpsiHats},
temperature gradients are specified by \parlink{dTHatdpsiHats}, a single $E_r$ is specified by \parlink{dPhiHatdpsiHat},
and the range of an $E_r$ scan is specified by \parlink{dPhiHatdpsiHatMin}-\parlink{dPhiHatdpsiHatMax}.\\
{\ttfamily inputRadialCoordinateForGradients}==1: Density gradients are specified by \parlink{dnHatdpsiNs},
temperature gradients are specified by \parlink{dTHatdpsiNs}, a single $E_r$ is specified by \parlink{dPhiHatdpsiN},
and the range of an $E_r$ scan is specified by \parlink{dPhiHatdpsiNMin}-\parlink{dPhiHatdpsiNMax}.\\
{\ttfamily inputRadialCoordinateForGradients}==2: Density gradients are specified by \parlink{dnHatdrHats},
temperature gradients are specified by \parlink{dTHatdrHats}, a single $E_r$ is specified by \parlink{dPhiHatdrHat},
and the range of an $E_r$ scan is specified by \parlink{dPhiHatdrHatMin}-\parlink{dPhiHatdrHatMax}.\\
{\ttfamily inputRadialCoordinateForGradients}==3: Density gradients are specified by \parlink{dnHatdrNs},
temperature gradients are specified by \parlink{dTHatdrNs}, a single $E_r$ is specified by \parlink{dPhiHatdrN},
and the range of an $E_r$ scan is specified by \parlink{dPhiHatdrNMin}-\parlink{dPhiHatdrNMax}.\\
{\ttfamily inputRadialCoordinateForGradients}==4:
Same as {\ttfamily inputRadialCoordinateForGradients}==2, except \parlink{Er} is used instead of \parlink{dPhiHatdrHat}.
Thus, density gradients are specified by \parlink{dnHatdrHats},
temperature gradients are specified by \parlink{dTHatdrHats}, a single $E_r$ is specified by \parlink{Er},
and the range of an $E_r$ scan is specified by \parlink{ErMin}-\parlink{ErMax}.\\
No matter which option you pick, the gradients with respect to all radial coordinates will be saved in the output \HDF~file.
}
\myhrule
\param{psiHat\_wish}
{real}
{-1}
{Only when \parlink{inputRadialCoordinate} == 0 and \parlink{geometryScheme} == 1, 5, 11, or 12.}
{Requested flux surface for the computation. See section \ref{sec:radialCoordinates}
for more information about radial coordinates.}
\myhrule
\param{psiN\_wish}
{real}
{0.25}
{Only when \parlink{inputRadialCoordinate} == 1 and \parlink{geometryScheme} == 1, 5, 11, or 12.}
{Requested flux surface for the computation. See section \ref{sec:radialCoordinates}
for more information about radial coordinates.}
\myhrule
\param{rHat\_wish}
{real}
{-1}
{Only when \parlink{inputRadialCoordinate} == 2 and \parlink{geometryScheme} == 1, 5, 11, or 12.}
{Requested flux surface for the computation. See section \ref{sec:radialCoordinates}
for more information about radial coordinates.}
\myhrule
\param{rN\_wish}
{real}
{0.5}
{Only when \parlink{inputRadialCoordinate} == 3 and \parlink{geometryScheme} == 1, 5, 11, or 12.}
{Requested flux surface for the computation. See section \ref{sec:radialCoordinates}
for more information about radial coordinates.}
\myhrule
\param{B0OverBBar}
{real}
{1.0}
{Only when \parlink{geometryScheme} == 1. Otherwise, {\ttfamily B0OverBBar} will be set according to the requested \parlink{geometryScheme}.}
{Magnitude of the (0,0) Boozer harmonic of the magnetic field strength (equivalent to $\left< B^3\right>/\left<B^2\right>$), normalized by $\bar{B}$.}
\myhrule
\param{GHat}
{real}
{3.7481}
{Only when \parlink{geometryScheme} == 1. Otherwise, {\ttfamily GHat} will be set according to the requested \parlink{geometryScheme}.}
{$G$ is $(c/2)\times$ the poloidal current outside the flux
surface. Equivalently, $G$ is the coefficient of $\nabla\zeta_B$ in the
covariant representation of $\vect{B}$ in terms of Boozer coordinates $(\theta_B,\zeta_B)$:
\begin{equation}
\label{eq:covariant}
\vect{B}(\psi,\theta_B,\zeta_B) = \beta(\psi,\theta_B,\zeta_B)\nabla\psi + I(\psi)\nabla\theta_B + G(\psi)\nabla\zeta_B.
\end{equation}
{\ttfamily GHat} is $G$ normalized by $\bar{B}\bar{R}$.}
\myhrule
\param{IHat}
{real}
{0.0}
{Only when \parlink{geometryScheme} == 1. Otherwise, {\ttfamily IHat} will be set according to the requested \parlink{geometryScheme}.}
{$I$ is $(c/2)\times$ the toroidal current inside the flux
surface. Equivalently, $I$ is the coefficient of $\nabla\theta_B$ in the
covariant representation of $\vect{B}$ in terms of Boozer coordinates $(\theta_B,\zeta_B)$ in (\ref{eq:covariant}).
{\ttfamily IHat} is $I$ normalized by $\bar{B}\bar{R}$.}
\myhrule
\param{iota}
{real}
{0.4542}
{Only when \parlink{geometryScheme} == 1. Otherwise, {\ttfamily iota} will be set according to the requested \parlink{geometryScheme}.}
{Rotational transform (rationalized), equivalent to $1/q$ where $q$ is the safety factor.}
\myhrule
\param{epsilon\_t}
{real}
{-0.07053}
{Only when \parlink{geometryScheme} == 1.}
{Toroidal variation in $B$, as defined by (\ref{eq:Bmodel}).}
\myhrule
\param{epsilon\_h}
{real}
{0.05067}
{Only when \parlink{geometryScheme} == 1.}
{Helical variation in $B$, as defined by (\ref{eq:Bmodel}).}
\myhrule
\param{epsilon\_antisymm}
{real}
{0.0}
{Only when \parlink{geometryScheme} == 1.}
{Stellarator-antisymmetric variation in $B$, as defined by (\ref{eq:Bmodel}).}
\myhrule
\param{helicity\_l}
{integer}
{2}
{Only when \parlink{geometryScheme} == 1.}
{Poloidal mode number of the helical variation in $B$, as defined by (\ref{eq:Bmodel}).}
\myhrule
\param{helicity\_n}
{integer}
{10}
{Only when \parlink{geometryScheme} == 1.}
{Toroidal mode number of the helical variation in $B$, as defined by (\ref{eq:Bmodel}).}
\myhrule
\param{helicity\_antisymm\_l}
{integer}
{1}
{Only when \parlink{geometryScheme} == 1.}
{Poloidal mode number of the stellarator-antisymmetric variation in $B$, as defined by (\ref{eq:Bmodel}).}
\myhrule
\param{helicity\_antisymm\_n}
{integer}
{0}
{Only when \parlink{geometryScheme} == 1.}
{Toroidal mode number of the stellarator-antisymmetric variation in $B$, as defined by (\ref{eq:Bmodel}).
Note that you can create an up-down asymmetric tokamak by setting \parlink{helicity\_antisymm\_n}=0, \parlink{epsilon\_h}=0,
and \parlink{epsilon\_antisymm}$>$0.}
\myhrule
\param{psiAHat}
{real}
{0.15596}
{Only when \parlink{geometryScheme} == 1. Otherwise, {\ttfamily psiAHat} will be set according to the requested \parlink{geometryScheme}.}
{{\ttfamily psiAHat} = $\psi_a / (\bar{B} \bar{R}^2)$
where $2 \pi \psi_a$ is the toroidal flux at the last closed flux surface.}
\myhrule
\param{aHat}
{real}
{0.5585}
{Only when \parlink{geometryScheme} == 1. Otherwise, {\ttfamily aHat} will be set according to the requested \parlink{geometryScheme}.}
{The effective minor radius at the last closed flux surface, in units of $\bar{R}$.
The code only uses {\ttfamily aBar} for converting between the various radial coordinates in input and output quantities.
}
\myhrule
\param{equilibriumFile}
{string}
{``''}
{Only when \parlink{geometryScheme} == 5, 11, or 12.}
{Filename from which to load the magnetic equilibrium, either in {\ttfamily vmec wout} ASCII or \netCDF~format, or IPP {\ttfamily .bc} format.}
\myhrule
\param{VMECRadialOption}
{integer}
{0}
{Only when \parlink{geometryScheme} == 5.}
{Controls whether the nearest available flux surface in the {\ttfamily vmec wout} file is used, or whether radial interpolation is applied to the \vmec~data
to obtain the magnetic field components on the exact surface requested.\\
{\ttfamily VMECRadialOption}=0: Use the exact {\ttfamily XXX\_wish} flux surface requested, by interpolating from the \vmec~radial grid.\\
{\ttfamily VMECRadialOption}=1: Use a surface that may be slightly different from {\ttfamily XXX\_wish} to get the nearest available flux surface from \vmec's HALF grid.
The components of $\vect{B}$ in \vmec~are stored on the half grid, so interpolation is then unnecessary.\\
{\ttfamily VMECRadialOption}=2: Use a surface that may be slightly different from {\ttfamily XXX\_wish} to get the nearest available flux surface from \vmec's FULL grid.
I'm not sure why you would want this, but the feature is implemented for completeness.
}
\myhrule
\param{min\_Bmn\_to\_load}
{real}
{0.0}
{Only when \parlink{geometryScheme} == 5, 11, or 12.}
{Filters the magnetic field read from an input file. Only Fourier modes $(m,n)$ for which $B_{m,n}$ is at least {\ttfamily min\_Bmn\_to\_load} will be included.}
\section{The {\ttfamily speciesParameters}~namelist}
This namelist defines which species are included in the calculation, along with the density and temperature and gradients thereof.
You will definitely want to set the parameters in this namelist.
Note that only one of the four parameters
\parlink{dnHatdpsiHats},
\parlink{dnHatdpsiNs},
\parlink{dnHatdrHats}, or
\parlink{dnHatdrNs}
will be used, depending on the value of \parlink{inputRadialCoordinateForGradients}
in the {\ttfamily \hyperref[sec:geometryParameters]{geometryParameters}} namelist.
Similarly, only one of the four parameters
\parlink{dTHatdpsiHats},
\parlink{dTHatdpsiNs},
\parlink{dTHatdrHats}, or
\parlink{dTHatdrNs}
will be used.
\myhrule
\param{Zs}
{1D array of reals}
{1.0}
{Always}
{Charges of each species, in units of the proton charge $e$}
\myhrule
\param{mHats}
{1D array of reals}
{1.0}
{Always}
{Masses of each species, in units of the reference mass $\bar{m}$}
\myhrule
\param{nHats}
{1D array of reals}
{1.0}
{Whenever \parlink{RHSMode} == 1}
{Densities of each species, in units of the reference density $\bar{n}$}
\myhrule
\param{THats}
{1D array of reals}
{1.0}
{Whenever \parlink{RHSMode} == 1}
{Temperatures of each species, in units of the reference temperature $\bar{T}$}
\myhrule
\param{dnHatdpsiHats}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 0}
{Radial density gradients of each species, with respect to the radial coordinate $\hat{\psi}$, normalized by the reference density $\bar{n}$.}
\myhrule
\param{dTHatdpsiHats}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 0}
{Radial temperature gradients of each species, with respect to the radial coordinate $\hat{\psi}$, normalized by the reference temperature $\bar{T}$.}
\myhrule
\param{dnHatdpsiNs}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 1}
{Radial density gradients of each species, with respect to the radial coordinate $\psi_N$, normalized by the reference density $\bar{n}$.}
\myhrule
\param{dTHatdpsiNs}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 1}
{Radial temperature gradients of each species, with respect to the radial coordinate $\psi_N$, normalized by the reference temperature $\bar{T}$.}
\myhrule
\param{dnHatdrHats}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 2}
{Radial density gradients of each species, with respect to the radial coordinate $\hat{r}$, normalized by the reference density $\bar{n}$.}
\myhrule
\param{dTHatdrHats}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 2}
{Radial temperature gradients of each species, with respect to the radial coordinate $\hat{r}$, normalized by the reference temperature $\bar{T}$.}
\myhrule
\param{dnHatdrNs}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 3}
{Radial density gradients of each species, with respect to the radial coordinate $r_N$, normalized by the reference density $\bar{n}$.}
\myhrule
\param{dTHatdrNs}
{1D array of reals}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 3}
{Radial temperature gradients of each species, with respect to the radial coordinate $r_N$, normalized by the reference temperature $\bar{T}$.}
\section{The {\ttfamily physicsParameters}~namelist}
\label{sec:physicsParameters}
The parameters in this namelist determine which terms are included or excluded in the kinetic equation.
You will want to be aware of most of these parameters.
\myhrule
\param{Delta}
{real}
{4.5694e-3}
{Whenever \parlink{RHSMode} == 1.}
{Roughly speaking, {\ttfamily Delta} is $\rho_*$ at the reference parameters. The precise definition is
\begin{eqnarray}
\mbox{\ttfamily Delta}
&=& \frac{c \bar{m} \bar{v}}{e \bar{B} \bar{R}} \;\;\; \mbox{(Gaussian units)} \\
&=& \frac{\bar{m} \bar{v}}{e \bar{B} \bar{R}} \;\;\; \mbox{(SI units)}, \nonumber
\end{eqnarray}
where $c$ is the speed of light,
$e$ is the proton mass,
and quantities with a bar are the normalization reference parameters discussed in section \ref{sec:normalizations}.
The default value {\ttfamily Delta} = 4.5694e-3 corresponds to $\bar{B}$ = 1 Tesla, $\bar{R}$ = 1 meter,
$\bar{m}$ = proton mass, and $\bar{T}$ = 1 keV.}
\myhrule
\param{alpha}
{real}
{1.0}
{Whenever \parlink{RHSMode} == 1 and $E_r$ is nonzero.}
{{\ttfamily alpha} $= e \bar{\Phi}/ \bar{T}$ (both Gaussian and SI units) where $e$ is the proton mass,
and $\bar{\Phi}$ and $\bar{T}$ are the normalization reference parameters discussed in section \ref{sec:normalizations}.
The default value {\ttfamily alpha} = 1.0 corresponds to $\bar{T}$ = 1 keV and $\bar{\Phi}$ = 1 kV.
The default value {\ttfamily alpha} = 1.0 also corresponds to $\bar{T}$ = 1 eV and $\bar{\Phi}$ = 1 V.
}
\myhrule
\param{nu\_n}
{real}
{8.330e-3}
{Whenever \parlink{RHSMode} == 1}
{Dimensionless collisionality at the reference parameters:
\begin{equation}
\mbox{\ttfamily nu\_n} = \bar{\nu} \frac{\bar{R}}{\bar{v}},
\end{equation}
where $\bar{R}$ and $\bar{v}$ are the normalization reference parameters discussed in section \ref{sec:normalizations},
and $\bar{\nu}$ is the dimensional collision frequency at the reference parameters. This frequency is defined as
\begin{eqnarray}
\bar{\nu}
&=& \frac{4\sqrt{2\pi} \bar{n} e^4 \ln\Lambda}{3 (4\pi\epsilon_0)^2 \sqrt{\bar{m}} \bar{T}^{3/2}} \;\;\; \mbox{(SI units}) \\
&=& \frac{4\sqrt{2\pi} \bar{n} e^4 \ln\Lambda}{3 \sqrt{\bar{m}} \bar{T}^{3/2}} \;\;\; \mbox{(Gaussian units}) \nonumber
\end{eqnarray}
where $e$ is the proton charge, $\bar{n}$, $\bar{m}$, and $\bar{T}$ are the normalization reference parameters discussed in section \ref{sec:normalizations},
and $\ln\Lambda$ is the Coulomb logarithm.
The default value {\ttfamily nu\_n} = 8.330e-3 corresponds to $\bar{R}$ = 1 meter,
$\bar{m}$ = proton mass, $\bar{n}$ = $10^{20}$ m$^{-3}$, $\bar{T}$ = 1 keV, and $\ln\Lambda = 17$.
}
\myhrule
\param{nuPrime}
{real}
{1.0}
{Only when \parlink{RHSMode} == 3.}
{Dimensionless collisionality used in place of \parlink{nHats}, \parlink{THats}, \parlink{mHats}, \parlink{Zs}, and \parlink{nu\_n} for computing monoenergetic transport coefficients.
See section \ref{sec:monoenergetic} for more details.}
\myhrule
\param{EStar}
{real}
{0.0}
{Only when \parlink{RHSMode} == 3.}
{Normalized radial electric field used in place of {\ttfamily dPhiHatdXXX} for computing monoenergetic transport coefficients.
See section \ref{sec:monoenergetic} for more details.}
\myhrule
\param{EParallelHat}
{real}
{0.0}
{Whenever \parlink{RHSMode} == 1}
{Inductive parallel electric field:
\begin{equation}
\mbox{\ttfamily EParallelHat} = \left< \vect{E}\cdot\vect{B}\right> \frac{\bar{R}}{\bar{\Phi}\bar{B}}
\end{equation}
(in both Gaussian and SI units) where $\left< \ldots \right>$ denotes a flux surface average,
$\vect{E}$ and $\vect{B}$ are the electric and magnetic field vectors, and
quantities with a bar are the normalization reference parameters discussed in section \ref{sec:normalizations}.}
\myhrule
\param{dPhiHatdpsiHat}
{real}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 0}
{The derivative of the electrostatic potential with respect to the radial coordinate $\hat{\psi}$,
i.e. the radial electric field up to a constant.
Notice that exactly 1 of the 5 variables \parlink{dPhiHatdpsiHat}, \parlink{dPhiHatdpsiN}, \parlink{dPhiHatdrHat}, \parlink{dPhiHatdrN}, or \parlink{Er}
will be used, depending on\\
\parlink{inputRadialCoordinateForGradients}.
}
\myhrule
\param{dPhiHatdpsiN}
{real}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 1}
{The derivative of the electrostatic potential with respect to the radial coordinate $\psi_N$,
i.e. the radial electric field up to a constant.
Notice that exactly 1 of the 5 variables \parlink{dPhiHatdpsiHat}, \parlink{dPhiHatdpsiN}, \parlink{dPhiHatdrHat}, \parlink{dPhiHatdrN}, or \parlink{Er}
will be used, depending on\\
\parlink{inputRadialCoordinateForGradients}.
}
\myhrule
\param{dPhiHatdrHat}
{real}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 2}
{The derivative of the electrostatic potential with respect to the radial coordinate $\hat{r}$,
i.e. the radial electric field up to a constant.
Notice that exactly 1 of the 5 variables \parlink{dPhiHatdpsiHat}, \parlink{dPhiHatdpsiN}, \parlink{dPhiHatdrHat}, \parlink{dPhiHatdrN}, or \parlink{Er}
will be used, depending on\\
\parlink{inputRadialCoordinateForGradients}.
}
\myhrule
\param{dPhiHatdrN}
{real}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 3}
{The derivative of the electrostatic potential with respect to the radial coordinate $r_N$,
i.e. the radial electric field up to a constant.
Notice that exactly 1 of the 5 variables \parlink{dPhiHatdpsiHat}, \parlink{dPhiHatdpsiN}, \parlink{dPhiHatdrHat}, \parlink{dPhiHatdrN}, or \parlink{Er}
will be used, depending on\\
\parlink{inputRadialCoordinateForGradients}.
}
\myhrule
\param{Er}
{real}
{0.0}
{Whenever \parlink{RHSMode} == 1 and \parlink{inputRadialCoordinateForGradients} == 4}
{The derivative of the normalized electrostatic potential $\hat{\Phi}$ with respect to the radial coordinate $\hat{r}$, multiplied by $-1$,
i.e. {\ttfamily Er}$=-$\parlink{dPhiHatdrHat}.
Notice that exactly 1 of the 5 variables \parlink{dPhiHatdpsiHat}, \parlink{dPhiHatdpsiN}, \parlink{dPhiHatdrHat}, \parlink{dPhiHatdrN}, or \parlink{Er}
will be used, depending on\\
\parlink{inputRadialCoordinateForGradients}.
}
\myhrule
\param{collisionOperator}
{integer}
{0}
{Always}
{Which collision operator to use:\\
{\setlength{\parindent}{0cm}
{\ttfamily collisionOperator} = 0: Full linearized Fokker-Planck operator.\\
{\ttfamily collisionOperator} = 1: Pitch-angle scattering operator (with no momentum-conserving field term).
}}
\myhrule
\param{constraintScheme}
{integer}
{-1}
{Always}
{Controls a small number of extra rows and columns of the system matrix
which (1) eliminate the null space of the matrix, and (2) ensure that a steady-state solution
to the kinetic equation exists even when phase-space volume and/or energy are not conserved.
These issues are detailed in section III of Ref \cite{sfincsPaper}.
\\
{\setlength{\parindent}{0cm}
{\ttfamily constraintScheme} = -1: Automatic. If \parlink{collisionOperator}==0 then {\ttfamily constraintScheme} will be set to 1,
otherwise {\ttfamily constraintScheme} will be set to 2.\\
{\ttfamily constraintScheme} = 0: No constraints.\\
{\ttfamily constraintScheme} = 1: 2 constraints per species: $\left<n_1\right>=0$ and $\left<p_1\right>=0$.
The particle and heat sources have the form $S = (a_2 x^2 + a_0) e^{-x^2}$.
The $a_2$ and $a_0$ coefficients are determined so that one source term provides particles but not energy,
whereas the other source term provides energy but not particles. \\
{\ttfamily constraintScheme} = 2: \Nx~constraints per species: $\left< f(L=0)\right>=0$ at each $x$.\\
{\ttfamily constraintScheme} = 3: Same as {\ttfamily constraintScheme} = 1,
except the particle and heat sources have the form $S = (a_4 x^4 + a_0) e^{-x^2}$.\\
{\ttfamily constraintScheme} = 4: Same as {\ttfamily constraintScheme} = 1,
except the particle and heat sources have the form $S = (a_4 x^4 + a_2 x^2) e^{-x^2}$.\\
You should set {\ttfamily constraintScheme} to -1 unless you know what you are doing.
}}
\myhrule
\param{includeXDotTerm}
{Boolean}
{\true}
{Whenever \parlink{RHSMode} $<3$ and the radial electric field is nonzero.}
{Whether or not to include the term in the kinetic equation corresponding to a change
in speed proportional to the radial electric field. This term is given by $\dot{x}$ in
equation (17) of \cite{sfincsPaper}:}
\begin{equation}
-(\vect{v}_{ma} \cdot\nabla r) \frac{Z_s e}{2 T_s x_s} \frac{d\Phi_0}{dr} \frac{\partial f_{a1}}{\partial x_s}
\end{equation}
\myhrule
\param{includeElectricFieldTermInXiDot}
{Boolean}
{\true}
{Whenever \parlink{RHSMode} $<3$ and the radial electric field is nonzero.}
{Whether or not to include the term in the kinetic equation corresponding to a change
in pitch angle $\xi$ proportional to the radial electric field. This term is given by the last line of
equation (17) of \cite{sfincsPaper}:}
\begin{equation}
\frac{(1-\xi^2)\xi}{2B^3} \frac{d\Phi_0}{dr}(\vect{B}\times\nabla r\cdot\nabla B)
\frac{\partial f_{s1}}{\partial \xi}
\end{equation}
\myhrule
\param{useDKESExBDrift}
{Boolean}
{\false}
{Whenever \parlink{RHSMode} $<3$ and the radial electric field is nonzero.}
{If true, the $\vect{E}\times\vect{B}$ drift term
multiplying $\partial f/\partial \theta$ and $\partial f/\partial \zeta$
is taken to be $\vect{E}\times\vect{B}\cdot \nabla (\theta \;\mbox{or} \;\zeta) / \left< B^2 \right>$
instead of
$\vect{E}\times\vect{B}\cdot \nabla (\theta \;\mbox{or} \;\zeta) /B^2$.
}
\myhrule
\param{include\_fDivVE\_term}
{Boolean}
{\false}
{Never}
{Obsolete}
\myhrule
\param{includePhi1}
{Boolean}
{\false}
{Whenever \parlink{RHSMode} == 1.}
{If false, no terms involving $\Phi_1 = \Phi-\left<\Phi\right>$ are included in the kinetic equation,
and the quasineutrality equation is not solved. If true, then terms involving $\Phi_1$ are included
in the kinetic equation, and the quasineutrality equation is solved at each point on the flux surface.
In this latter case, many more quantities are computed and saved in the output file, such as radial fluxes
associated with the radial $\vect{E}\times\vect{B}$ drift.}
\myhrule
\param{includeRadialExBDrive}
{Boolean}
{\false}
{Whenever \parlink{RHSMode} == 1.}
{If true, the radial $\vect{E}\times\vect{B}$ term $(\vect{v}_{E} \cdot\nabla\psi)f_{Ms} [(1/n_s)(dn_s/d\psi) + (x_s^2-3/2)(1/T_s)(dT_s/d\psi)]$
will be included in the kinetic equation. This is one of the terms considered in Ref \cite{Regana2013} which
should be unimportant for the main ions but which may be important for impurities.
Use of this option requires \parlink{includePhi1}=\true, since the radial $\vect{E}\times\vect{B}$ drift
arises due to $\Phi_1$.
}
\myhrule
\param{nonlinear}
{Boolean}
{\false}
{Whenever \parlink{RHSMode} == 1.}
{If true, the term
$-(Z_s e/m) (\nabla_{||} \Phi_1) (\partial f_{s1}/\partial v_{||})_\mu$ will be included
in the kinetic equation. This is one of the terms considered in Ref \cite{Regana2013} which
should be unimportant for the main ions but which may be important for impurities.
This term is nonlinear in the unknowns $f_{s1}$ and $\Phi_1$.
Newton's method will be used to solve the nonlinear system, meaning that the usual linear solve in \sfincs~must
be iterated several times.
Running with {\ttfamily nonlinear}=\true requires \parlink{includePhi1}=\true
}
\myhrule
\param{includeTemperatureEquilibrationTerm}
{Boolean}
{\false}
{Whenever \parlink{RHSMode} == 1.}
{When true, the term $C_{ab}[ f_{Ma}, f_{Mb}]$ is included in the kinetic equation,
i.e. collisions between the leading-order Maxwellians of different species. This term is nonzero
when the temperature is not the same for all species. The resulting contribution to the non-Maxwellian
distribution function is isotropic and so does not directly give any parallel or radial transport.}
\myhrule
\param{magneticDriftScheme}
{integer}
{0}
{Whenever \parlink{RHSMode} == 1.}
{This variable controls the poloidal and magnetic drifts, and does not affect the radial magnetic drift.\\
{\ttfamily magneticDriftScheme} = 0: No poloidal or toroidal magnetic drift.\\
{\ttfamily magneticDriftScheme} = 1: Use the magnetic drift $v_m = (v_{||} / \Omega_c) \nabla \times (v_{||} \vect{b})$.\\
{\ttfamily magneticDriftScheme} = 2: Use the grad-B and curvature drift, plus the parallel velocity correction $v_{\perp}^2/(2 \Omega_c) \vect{b} \vect{b} \cdot \nabla \times \vect{b}$.
}
\section{The {\ttfamily resolutionParameters}~namelist}
\label{sec:resolutionParameters}
In this namelist, there are 4 parameters you definitely need to be aware of and adjust: \Ntheta, \Nzeta, \Nxi, and \Nx.
See chapter \ref{ch:resolution} for details. You may or may not
need to adjust {\ttfamily solverTolerance}. The other parameters in this namelist almost never need to be adjusted.
\myhrule
\param{NFourier}
{integer}
{10}
{Always}
{Number of Fourier modes in the poloidal and toroidal angles used to represent the distribution function.
Memory and time requirements DO depend strongly on this parameter.
The value of this parameter required for convergence depends strongly on
the collisionality, with higher values required at low collisionality.
}
\myhrule
\param{mmax}
{integer}
{32}
{Always}
{Determines the maximum poloidal mode number $m$ which can be included: $m \le${\ttfamily mmax}. Memory and time requirements do not depend strongly on this parameter.
}
\myhrule
\param{nmax}
{integer}
{32}
{Always}
{Determines the maximum toroidal mode number $n$ which can be included: $|n|/${\ttfamily Nperiod}$\le${\ttfamily nmax}. Memory and time requirements do not depend strongly on this parameter.
To force a calculation to be axisymmetric, set {\ttfamily nmax} = 0.
}
\myhrule
\param{Nxi}
{integer}
{16}
{Always}
{Number of Legendre polynomials used to represent the pitch-angle dependence of the distribution function.
Memory and time requirements DO depend strongly on this parameter.
The value of this parameter required for convergence depends strongly on
the collisionality. At high collisionality, this parameter can be as low
as 5. At low collisionality, this parameter may need to be many 10s or
even $>$ 200 for convergence.}
\myhrule
\param{Nx}
{integer}
{5}
{Always}
{Number of grid points in energy used to represent the distribution function.
Memory and time requirements DO depend strongly on this parameter.
This parameter almost always needs to be at least 5.
Usually a value in the range 5-8 is plenty for convergence, though in exceptional circumstances
you may need to go up to 10-15.
}
\myhrule
\param{solverTolerance}
{real}
{1e-6}
{Always}
{
Tolerance used to define convergence of the Krylov solver.
This parameter does not affect memory requirements but it does affect the
time required for solution somewhat.
Occasionally you may want to ease this tolerance to 1e-5 so fewer iterations of the Krylov solver are needed.
}
\myhrule
\param{NL}
{integer}
{4}
{Whenever \parlink{collisionOperator} == 0.}
{Number of Legendre polynomials used to represent the Rosenbluth
potentials. This number can basically always be 4,
since results barely change when \parlink{NL} is increased above this value.
Memory and time requirements do NOT depend strongly on this parameter.
}
\myhrule
\param{NxPotentialsPerVth}
{real}
{40.0}
{Only when \parlink{collisionOperator} == 0 and \parlink{xGridScheme} $<$ 5.
Since \parlink{xGridScheme} = 5 is recommended, this parameter is basically obsolete.}
{
Number of grid points in energy used to represent the Rosenbluth potentials
for the original implementation of the Fokker-Planck operator described in \cite{speedGrids}.
Memory and time requirements do NOT depend strongly on this parameter.}
\myhrule
\param{xMax}
{real}
{5.0}
{Only when \parlink{collisionOperator} == 0 and \parlink{xGridScheme} $<$ 5.
Since \parlink{xGridScheme} = 5 is recommended, this parameter is basically obsolete.}
{
Maximum normalized speed for the Rosenbluth potential grid
for the original implementation of the Fokker-Planck operator described in \cite{speedGrids}.
Memory and time requirements do NOT depend strongly on this parameter.}
\section{The {\ttfamily otherNumericalParameters}~namelist}
\label{sec:otherNumericalParameters}
The parameters in this namelist are advanced, and the default values are best for routine use of the code.
\myhrule
\param{FourierOption}
{integer}
{2}
{Whenever \parlink{nmax}$>$0, i.e. whenever the calculation is nonaxisymmetric.}
{Option for choosing which spatial Fourier modes will be included.\\
{\ttfamily FourierOption} = 0: Include all Fourier modes satisfying $|n|/${\ttfamily Nperiod}$\le $\parlink{nmax} and $|m| \le$\parlink{mmax}.
In this case, the input value of \parlink{NFourier} will be ignored, and \parlink{NFourier} will be set to
\parlink{mmax}$\times(2\times$\parlink{nmax}$+1)+$\parlink{nmax}$+1$.\\
{\ttfamily FourierOption} = 1: Include the first \parlink{NFourier} modes ranked by $m^2+[n/(${\ttfamily Nperiod}$\times$\parlink{FourierFactor}$)]^2$.\\
{\ttfamily FourierOption} = 2: Include the first \parlink{NFourier} modes ranked by the spectrum
of $B^\alpha$ for various powers $\alpha$.
}
\myhrule
\param{FourierFactor}
{real}
{1.0}
{Whenever \parlink{FourierOption}=1 and \parlink{nmax}$>0$.}
{Scales the aspect ratio of the ellipse in $(m,n)$ space for the spatial Fourier modes to include.
The modes are ranked by $m^2+[n/(${\ttfamily Nperiod}$\times$\parlink{FourierFactor}$)]^2$.}
\myhrule
\param{xGridScheme}
{integer}
{5}
{Whenever \parlink{RHSMode} is 1 or 2.}
{Discretization scheme for the speed coordinate $x$.\\
{\ttfamily xGridScheme} = 1: New orthogonal polynomials with no point at $x=0$. Original treatment of Rosenbluth potentials.\\
{\ttfamily xGridScheme} = 2: New orthogonal polynomials with a point at $x=0$. Original treatment of Rosenbluth potentials.\\
{\ttfamily xGridScheme} = 3: Uniform finite differences on [0, \parlink{xMax}], forcing $f=0$ at \parlink{xMax}. 2-point stencil for interpolating to other grids.\\
{\ttfamily xGridScheme} = 4: Uniform finite differences on [0, \parlink{xMax}], forcing $f=0$ at \parlink{xMax}. 4-point stencil for interpolating to other grids.\\
{\ttfamily xGridScheme} = 5: New orthogonal polynomials with no point at $x=0$. New treatment of Rosenbluth potentials.\\
{\ttfamily xGridScheme} = 6: New orthogonal polynomials with a point at $x=0$. New treatment of Rosenbluth potentials.\\
{\ttfamily xGridScheme} = 7: Chebyshev grid on [0, \parlink{xMax}], forcing $f=0$ at \parlink{xMax}. Original treatment of Rosenbluth potentials.\\
{\ttfamily xGridScheme} = 8: Chebyshev grid on [0, \parlink{xMax}], with no boundary condition imposed at \parlink{xMax}. Original treatment of Rosenbluth potentials.\\
The recommended value for this parameter is the default, 5.
When {\ttfamily xGridScheme} = 5 or 6, then the following quantities do not matter:
\parlink{NxPotentialsPerVth}, \parlink{xMax}, and \parlink{xPotentialsGridScheme}.}
\myhrule
\param{xGrid\_k}
{integer}
{0}
{Whenever \parlink{RHSMode} is 1 or 2 and \parlink{xGridScheme} = 1, 2, 5, or 6.}
{For \parlink{xGridScheme} = 1, 2, 5, or 6, the distribution function will be represented in terms of polynomials $P_n(x)$
that are orthogonal under the weight $\int_0^\infty dx\; x^k \exp(-x^2) P_n(x) P_m(x) \propto \delta_{n,m}$
where $k$ is an exponent set by the parameter {\ttfamily xGrid\_k} here.
A good value to use is 0, 1, or 2.}
\myhrule
\param{xPotentialsGridScheme}
{integer}
{2}
{Whenever \parlink{RHSMode} is 1 or 2 and \parlink{xGridScheme} is $<$5.
Since the recommended setting for \parlink{xGridScheme} is 5, this parameter is rarely relevant.}
{When an explicit grid is used for the Rosenbluth potentials, which grid and interpolation scheme to use.\\
{\ttfamily xPotentialsGridScheme} = 1: Uniform grid. 5-point stencil for derivatives. 2-point stencil for interpolating to other grids.\\
{\ttfamily xPotentialsGridScheme} = 2: Uniform grid. 5-point stencil for derivatives. 4-point stencil for interpolating to other grids.\\
{\ttfamily xPotentialsGridScheme} = 3: Use same grid as for distribution function, so no interpolation needed for the self-collision operator.
You must set \parlink{xGridScheme} = 3 or 4 to use this setting. Use 2-point stencil for interpolating to other species' grids.\\
{\ttfamily xPotentialsGridScheme} = 4: Same as option 3, except use a 4-point stencil for interpolating to other species' grids.\\
The recommended setting is {\ttfamily xPotentialsGridScheme} = 2.}
\myhrule
\param{whichParallelSolverToFactorPreconditioner}
{integer}
{1}
{Always}
{Which software package is used to $LU$-factorize the preconditioner matrix.\\
{\ttfamily whichParallelSolverToFactorPreconditioner} = 1: Use \mumps~if it is available, otherwise use \superludist.\\
{\ttfamily whichParallelSolverToFactorPreconditioner} = 2: Force use of \superludist~ even if \mumps~is available.
}
\myhrule
\param{PETSCPreallocationStrategy}
{integer}
{1}
{Always}
{This setting changes the estimated number of nonzeros (nnz) used for allocating memory for the system matrix and preconditioner.\\
{\ttfamily PETSCPreallocationStrategy} = 0: Old method with high estimated nnz. This method involves relatively simpler code but uses WAY more memory than necessary.\\
{\ttfamily PETSCPreallocationStrategy} = 1: New method with lower, more precise estimated nnz. This method should use much less memory.\\
Use {\ttfamily PETSCPreallocationStrategy} = 1 unless you know what you are doing.}
\section{The {\ttfamily preconditionerOptions}~namelist}
This namelist controls how elements are removed from the ``real'' matrix in order to obtain
the preconditioner matrix. The default values are usually best, but if you find that there are more than 100 iterations
of GMRES/KSP, it may be worth adjusting these settings. As long as KSP converges, these parameters
should have no impact (to several digits) on the physical outputs such as parallel flows and radial fluxes.
Therefore, do not worry about (for example) ``dropping coupling between species'' in the first
parameter below, since full inter-species coupling will be retained in the real equations that are being solved.
\myhrule
\param{preconditioner\_species}
{integer}
{1}
{Whenever there are 2 or more species.}
{\\
{\ttfamily preconditioner\_species} = 0: Keep all coupling between species.\\
{\ttfamily preconditioner\_species} = 1: Drop all coupling between species.\\
The default value of 1 is recommended, except perhaps at high collisionality where 0 may be preferable.}
\myhrule
\param{preconditioner\_x}
{integer}
{1}
{Whenever \parlink{RHSMode} = 1 or 2.}
{\\
{\ttfamily preconditioner\_x} = 0: Keep full $x$ coupling.\\
{\ttfamily preconditioner\_x} = 1: Drop everything off-diagonal in $x$.\\
{\ttfamily preconditioner\_x} = 2: Keep only upper-triangular part in $x$.\\
{\ttfamily preconditioner\_x} = 3: Keep only the tridiagonal terms in $x$.\\
{\ttfamily preconditioner\_x} = 4: Keep only the diagonal and superdiagonal in $x$.\\
The default value of 1 is strongly recommended, except perhaps at high collisionality where 0 may be preferable.}
\myhrule
\param{preconditioner\_x\_min\_L}
{integer}
{0}
{Whenever \parlink{RHSMode} = 1 or 2 and \parlink{preconditioner\_x} $>$ 0.}
{The $x$ structure of the matrix will only be simplified for Legendre index $L$ is $\ge$ this value.
Set {\ttfamily preconditioner\_x\_min\_L} = 0 to simplify the matrix for every $L$.
Recommended values are 0, 1, or 2.}
\myhrule
\param{preconditioner\_Fourier}
{integer}
{0}
{Always}
{\\
{\ttfamily preconditioner\_Fourier} = 0: Keep full coupling in the toroidal and poloidal angles.\\
{\ttfamily preconditioner\_Fourier} = 1: Drop some coupling in the toroidal and poloidal angles.\\
The default value of 0 is strongly recommended.}
\myhrule
\param{preconditioner\_Fourier\_min\_L}
{integer}
{0}
{Whenever \parlink{preconditioner\_Fourier} $>$ 0.}
{The Fourier structure of the matrix will only be simplified for Legendre index $L$ is $\ge$ this value.
Set {\ttfamily preconditioner\_Fourier\_min\_L} = 0 to simplify the matrix for every $L$.
}
\myhrule
\param{preconditioner\_xi}
{integer}
{1}
{Always}
{\\
{\ttfamily preconditioner\_xi} = 0: Keep full $\xi$ coupling.\\
{\ttfamily preconditioner\_xi} = 1: Drop terms that are $\pm 2$ rows from the diagonal in $\xi$,
so the preconditioner matrix becomes tridiagonal in $\xi$.
(Normally the preconditioner matrix is pentadiagonal in $\xi$.)\\
Either a setting of 0 or 1 can be good for this parameter.}
\myhrule
\param{reusePreconditioner}
{Boolean}
{\true}
{Only when \parlink{nonlinear} = \true}
{If true, the nonlinear term will not be included in the preconditioner matrix, meaning the preconditioner matrix is the same
at every iteration, and so the preconditioner matrix only needs to be $LU$-factorized once. If false, the preconditioner matrix
for the Jacobian will be different at each iteration of the Newton solve, so the preconditioner needs to be $LU$-factorized at
each iteration. The nonlinear term also introduces a lot of nonzeros into the preconditioner matrix, so setting
{\ttfamily reusePreconditioner =}\true~not only dramatically reduces the time required for a nonlinear calculation, but also the memory required.}
\section{The {\ttfamily export\_f}~namelist}
This namelist controls whether and how the distribution function is saved in {\ttfamily sfincsOutput.h5}.
For each of the 4 coordinates $(\theta, \zeta, x, \xi)$, the distribution function can be given with the same discretization
used for solving the kinetic equation, or you can interpolate to a different grid/discretization.
For all available settings, the distribution function will be reported on a tensor product grid in the 4 coordinates.
\myhrule
\param{export\_full\_f}
{Boolean}
{\false}
{Always}
{Whether or not to save the full distribution function (the sum of the leading-order Maxwellian and the departure from it)
in the output file.}
\myhrule
\param{export\_delta\_f}
{Boolean}
{\false}
{Always}
{Whether or not to save the departure from a Maxwellian distribution function in the output file.}
\myhrule
\param{export\_f\_theta\_option}
{integer}
{2}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true}
{Controls which grid in $\theta$ is used for exporting the distribution function.\\
{\ttfamily export\_f\_theta\_option} = 0: Report the distribution function on the original $\theta$ grid (with \Ntheta~points) used for solving the kinetic equation.\\
{\ttfamily export\_f\_theta\_option} = 1: Interpolate to a different grid, specified by \parlink{export\_f\_theta}. Linear interpolation will be used.
No sorting of the requested values is performed.\\
{\ttfamily export\_f\_theta\_option} = 2: Do not interpolate. Use the
values of the $\theta$ grid that are closest to the values requested
in \parlink{export\_f\_theta}. Values of $\theta$ will be in
increasing order. If multiple requested values are close to the same
grid point, the number of points returned will be less than the number
of points requested. \\
For all of these options, you can see \parlink{export\_f\_theta} in {\ttfamily sfincsOutput.h5} for the actual grid used in the end.
}
\myhrule
\param{export\_f\_zeta\_option}
{integer}
{2}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true}
{Controls which grid in $\zeta$ is used for exporting the distribution function.\\
{\ttfamily export\_f\_zeta\_option} = 0: Report the distribution function on the original $\zeta$ grid (with \Nzeta~points) used for solving the kinetic equation.\\
{\ttfamily export\_f\_zeta\_option} = 1: Interpolate to a different grid, specified by \parlink{export\_f\_zeta}. Linear interpolation will be used.
No sorting of the requested values is performed.\\
{\ttfamily export\_f\_zeta\_option} = 2: Do not interpolate. Use the
values of the $\zeta$ grid that are closest to the values requested
in \parlink{export\_f\_zeta}. Values of $\zeta$ will be in
increasing order. If multiple requested values are close to the same
grid point, the number of points returned will be less than the number
of points requested. \\
For all of these options, you can see \parlink{export\_f\_zeta} in {\ttfamily sfincsOutput.h5} for the actual grid used in the end.
}
\myhrule
\param{export\_f\_theta}
{1D array of reals}
{0.0}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true, and \parlink{export\_f\_theta\_option} $>0$.}
{Values of $\theta$ on which you want to save the distribution function. modulo$(\ldots, 2\pi)$ will be applied. See \parlink{export\_f\_theta\_option} for details}
\myhrule
\param{export\_f\_zeta}
{1D array of reals}
{0.0}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true, and \parlink{export\_f\_zeta\_option} $>0$.}
{Values of $\zeta$ on which you want to save the distribution function. modulo$(\ldots, 2\pi/\mbox{\ttfamily NPeriods})$ will be applied. See \parlink{export\_f\_zeta\_option} for details}
\myhrule
\param{export\_f\_xi\_option}
{integer}
{1}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true}
{Controls which discretization in $\xi$ is used for exporting the distribution function.\\
{\ttfamily export\_f\_xi\_option} = 0: Report the distribution function as amplitudes of \Nxi~Legendre polynomials, as used internally by \sfincs~for solving the kinetic equation.\\
{\ttfamily export\_f\_xi\_option} = 1: Report the distribution function on the values of $\xi$ specified by \parlink{export\_f\_xi}. No sorting of the requested values is performed.
}
\myhrule
\param{export\_f\_xi}
{1D array of reals}
{0.0}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true, and \parlink{export\_f\_xi\_option} = 1.}
{Values of $\xi$ on which you want to save the distribution function. Values must lie in the range $[-1,1]$.}
\myhrule
\param{export\_f\_x\_option}
{integer}
{0}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true}
{Controls which grid in $x = v/\sqrt{2T/m}$ is used for exporting the distribution function.\\
{\ttfamily export\_f\_x\_option} = 0: Report the distribution function on the original $x$ grid (with \Nx~points) used for solving the kinetic equation.\\
{\ttfamily export\_f\_x\_option} = 1: Interpolate to a different grid, specified by \parlink{export\_f\_x}. Polynomial spectral interpolation will be used.
No sorting of the requested values is performed.\\
{\ttfamily export\_f\_x\_option} = 2: Do not interpolate. Use the values of the internal $x$ grid that are closest to the values requested in \parlink{export\_f\_x}.
Values of $x$ will be in increasing order. If multiple requested values are close to the same grid point, the number of
points returned will be less than the number of points requested.\\
For all of these options, you can see \parlink{export\_f\_x} in {\ttfamily sfincsOutput.h5} for the actual grid used in the end.
}
\myhrule
\param{export\_f\_x}
{1D array of reals}
{1.0}
{Whenever \parlink{export\_full\_f} or \parlink{export\_delta\_f} is \true, and \parlink{export\_f\_x\_option} $>0$.}
{Values of $x$ on which you want to save the distribution function. Values must be $\ge 0$.}
\section{Directives for \sfincsScan}
\label{sec:sfincsScanParams}
The parameters for \sfincsScan~ begin with the code {\ttfamily !ss} and so are not read by the fortran part of \sfincs.
These parameters matter only when \sfincsScan~is called and are all ignored when \sfincs~is executed directly.
These parameters can appear anywhere in the {\ttfamily input.namelist} file, in any namelist or outside of any namelist.
Note that \sfincsScan~ parameters do not have defaults, unlike fortran namelist parameters.\\
\myhrule
\ssparam{scanType}
{integer}
{Any time \sfincsScan~is called.}
{Which type of scan will be run when \sfincsScan~is called. \\
{\ttfamily scanType} = 1: Resolution convergence scan. (Scan the parameters in the resolutionParameters namelist.)\\
{\ttfamily scanType} = 2: Scan of $E_r$.\\
{\ttfamily scanType} = 3: Scan any one input parameter that takes a numeric value.\\
{\ttfamily scanType} = 4: Scan radius, taking the density and temperature profiles from the {\ttfamily profiles} file.
In this type of scan, the same radial electric field is used at every radius.
See \path{sfincs/fortran/utils/profiles.XXX} for examples.\\
{\ttfamily scanType} = 5: Scan radius, and at each radius, scan $E_r$. Density and temperature profiles are again
taken from the {\ttfamily profiles} file; see \path{sfincs/fortran/utils/profiles.XXX} for examples.
In this type of scan, \sfincsScan~creates a subdirectory for each value of minor radius, and a
{\ttfamily scanType} = 2 scan is run in each of these subdirectories.
\\
{\ttfamily scanType} = 21: Read in a list of requested runs from a
file {\ttfamily runspec.dat}. See
\path{sfincs/fortran/utils/sfincsScan_21} for an example file. If the
file has a different name than {\ttfamily runspec.dat}, for instance
{\ttfamily thefilename.dat}, this name can be
specified by adding the line\\
{\ttfamily
!ss runSpecFile = thefilename.dat\\
}
}
\subsection{Parameters related only to {\ttfamily scanType} = 1 (resolution convergence scans).}
\label{sec:scanType1Parameters}
The resolution parameters discussed in section \ref{sec:resolutionParameters}
each have 3 associated \sfincsScan~parameters
which are used for convergence scans (\parlink{scanType} = 1): {\ttfamily ...MinFactor}, {\ttfamily ...MaxFactor}, and {\ttfamily ...NumRuns}.
The first two of these set the range by which the associated resolution parameter is scaled
in a convergence scan. The {\ttfamily ...NumRuns} parameter sets the number of values tried in a
convergence scan. The code attempts to space the values evenly in a logarithmic
sense, as in Matlab's 'logspace' function. For example, the following settings\\
{\ttfamily
Nxi = 20\\
!ss NxiMinFactor = 0.5\\
!ss NxiMaxFactor = 2.0\\
!ss NxiNumRuns = 3\\
}
would mean the values \Nxi = 10, 20, and 40 would be tried in a convergence scan.
If you don't want to scan a variable in a convergence scan, set the associated
{\ttfamily ...NumRuns} parameter to 0, or do not include this parameter in the input file.
For each resolution parameter (\Ntheta, \Nzeta, \Nxi, etc.), the value itself is read by
Fortran and so should not be preceded by {\ttfamily !ss}. However the {\ttfamily ...MinFactor}, {\ttfamily ...MaxFactor}, and {\ttfamily ...NumRuns}
quantities are read by \sfincsScan~and so must be preceded by {\ttfamily !ss}
\myhrule
\ssparam{NthetaMaxFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum factor by which \Ntheta~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NthetaMinFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Minimum factor by which \Ntheta~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NthetaNumRuns}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum number of values of \Ntheta~which will be used in a convergence scan. Only odd integers can be used
for \Ntheta, so the actual number of \Ntheta~values used in the scan may be less than {\ttfamily NthetaNumRuns}.}
\myhrule
\ssparam{NzetaMaxFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum factor by which \Nzeta~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NzetaMinFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Minimum factor by which \Nzeta~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NzetaNumRuns}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum number of values of \Nzeta~which will be used in a convergence scan. Only odd integers can be used
for \Nzeta, so the actual number of \Nzeta~values used in the scan may be less than {\ttfamily NzetaNumRuns}.}
\myhrule
\ssparam{NxiMaxFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum factor by which \Nxi~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NxiMinFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Minimum factor by which \Nxi~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NxiNumRuns}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum number of values of \Nxi~which will be used in a convergence scan. Only integers can be used
for \Nxi, so the actual number of \Nxi~values used in the scan may be less than {\ttfamily NxiNumRuns}.}
\myhrule
\ssparam{NxMaxFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum factor by which \Nx~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NxMinFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Minimum factor by which \Nx~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NxNumRuns}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum number of values of \Nx~which will be used in a convergence scan. Only integers can be used
for \Nx, so the actual number of \Nx~values used in the scan may be less than {\ttfamily NxNumRuns}.}
\myhrule
\ssparam{solverToleranceMaxFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum factor by which \parlink{solverTolerance} will be multiplied in a convergence scan.}
\myhrule
\ssparam{solverToleranceMinFactor}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Minimum factor by which \parlink{solverTolerance} will be multiplied in a convergence scan.}
\myhrule
\ssparam{solverToleranceNumRuns}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 1.}
{Number of values of \parlink{solverTolerance} which will be used in a convergence scan.}
\myhrule
\ssparam{NLMaxFactor}
{real}
{Only when \parlink{collisionOperator} = 0 and \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum factor by which \NL~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NLMinFactor}
{real}
{Only when \parlink{collisionOperator} = 0 and \sfincsScan~is run with \parlink{scanType} = 1.}
{Minimum factor by which \NL~will be multiplied in a convergence scan.}
\myhrule
\ssparam{NLNumRuns}
{integer}
{Only when \parlink{collisionOperator} = 0 and \sfincsScan~is run with \parlink{scanType} = 1.}
{Maximum number of values of \NL~which will be used in a convergence scan. Only integers can be used
for \NL, so the actual number of \NL~values used in the scan may be less than {\ttfamily NLNumRuns}.}
\myhrule
\ssparam{NxPotentialsPerVthMaxFactor}
{real}
{Only when \parlink{collisionOperator} = 0, \parlink{xGridScheme} $<$ 5, and \sfincsScan~is run with \parlink{scanType} = 1.
Since the recommended value of \parlink{xGridScheme} is 5, this parameter is basically obsolete.}
{Maximum factor by which \parlink{NxPotentialsPerVth} will be multiplied in a convergence scan.}
\myhrule
\ssparam{NxPotentialsPerVthMinFactor}
{real}
{Only when \parlink{collisionOperator} = 0, \parlink{xGridScheme} $<$ 5, and \sfincsScan~is run with \parlink{scanType} = 1.
Since the recommended value of \parlink{xGridScheme} is 5, this parameter is basically obsolete.}
{Minimum factor by which \parlink{NxPotentialsPerVth} will be multiplied in a convergence scan.}
\myhrule
\ssparam{NxPotentialsPerVthNumRuns}
{integer}
{Only when \parlink{collisionOperator} = 0, \parlink{xGridScheme} $<$ 5, and \sfincsScan~is run with \parlink{scanType} = 1.
Since the recommended value of \parlink{xGridScheme} is 5, this parameter is basically obsolete.}
{Number of values of \parlink{NxPotentialsPerVth} which will be used in a convergence scan.}
\myhrule
\ssparam{xMaxMaxFactor}
{real}
{Only when \parlink{collisionOperator} = 0, \parlink{xGridScheme} $<$ 5, and \sfincsScan~is run with \parlink{scanType} = 1.
Since the recommended value of \parlink{xGridScheme} is 5, this parameter is basically obsolete.}
{Maximum factor by which \parlink{xMax} will be multiplied in a convergence scan.}
\myhrule
\ssparam{xMaxMinFactor}
{real}
{Only when \parlink{collisionOperator} = 0, \parlink{xGridScheme} $<$ 5, and \sfincsScan~is run with \parlink{scanType} = 1.
Since the recommended value of \parlink{xGridScheme} is 5, this parameter is basically obsolete.}
{Minimum factor by which \parlink{xMax} will be multiplied in a convergence scan.}
\myhrule
\ssparam{xMaxNumRuns}
{integer}
{Only when \parlink{collisionOperator} = 0, \parlink{xGridScheme} $<$ 5, and \sfincsScan~is run with \parlink{scanType} = 1.
Since the recommended value of \parlink{xGridScheme} is 5, this parameter is basically obsolete.}
{Number of values of \parlink{xMax} which will be used in a convergence scan.}
\subsection{Parameters related only to {\ttfamily scanType} = 2 (scans of radial electric field).}
In this scan of the radial electric field, the values of electric field used
will always be uniformly (linearly) spaced.
Notice that exactly 1 of the 5 variables \parlink{dPhiHatdpsiHatMax}, \parlink{dPhiHatdpsiNMax}, \parlink{dPhiHatdrHatMax}, \parlink{dPhiHatdrNMax}, or \parlink{ErMax}
will be used, depending on\\
\parlink{inputRadialCoordinateForGradients}.
Similarly, exactly 1 of the 5 variables
\parlink{dPhiHatdpsiHatMin}, \parlink{dPhiHatdpsiNMin}, \parlink{dPhiHatdrHatMin}, \parlink{dPhiHatdrNMin}, or \parlink{ErMin}
will be used.
\myhrule
\ssparam{NErs}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 2.}
{Number of values of radial electric field to consider in a scan.}
\myhrule
\ssparam{dPhiHatdpsiHatMax}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 0 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Maximum value of \parlink{dPhiHatdpsiHat} to use in the scan.}
\myhrule
\ssparam{dPhiHatdpsiHatMin}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 0 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Minimum value of \parlink{dPhiHatdpsiHat} to use in the scan.}
\myhrule
\ssparam{dPhiHatdpsiNMax}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 1 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Maximum value of \parlink{dPhiHatdpsiN} to use in the scan.}
\myhrule
\ssparam{dPhiHatdpsiNMin}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 1 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Minimum value of \parlink{dPhiHatdpsiN} to use in the scan.}
\myhrule
\ssparam{dPhiHatdrHatMax}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 2 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Maximum value of \parlink{dPhiHatdrHat} to use in the scan.}
\myhrule
\ssparam{dPhiHatdrHatMin}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 2 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Minimum value of \parlink{dPhiHatdrHat} to use in the scan.}
\myhrule
\ssparam{dPhiHatdrNMax}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 3 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Maximum value of \parlink{dPhiHatdrN} to use in the scan.}
\myhrule
\ssparam{dPhiHatdrNMin}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 3 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Minimum value of \parlink{dPhiHatdrN} to use in the scan.}
\myhrule
\ssparam{ErMax}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 4 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Maximum value of \parlink{Er} to use in the scan.}
\myhrule
\ssparam{ErMin}
{real}
{Only when \parlink{inputRadialCoordinateForGradients} = 4 and \sfincsScan~is run with \parlink{scanType} = 2.}
{Minimum value of \parlink{Er} to use in the scan.}
\subsection{Parameters related only to {\ttfamily scanType} = 3 (scans of an arbitrary input parameter).}
\ssparam{scanVariable}
{string. Must be of the fortran namelist parameters that takes an integer or real value. Case-insensitive.}
{Only when \sfincsScan~is run with \parlink{scanType} = 3.}
{Name of the variable to scan in a \parlink{scanType} = 3 scan.}
\myhrule
\ssparam{scanVariableMax}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 3.}
{Maximum value of \parlink{scanVariable} to use in a \parlink{scanType} = 3 scan.}
\myhrule
\ssparam{scanVariableMin}
{real}
{Only when \sfincsScan~is run with \parlink{scanType} = 3.}
{Minimum value of \parlink{scanVariable} to use in a \parlink{scanType} = 3 scan.}
\myhrule
\ssparam{scanVariableN}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 3.}
{Number of values of \parlink{scanVariable} to use in a \parlink{scanType} = 3 scan.}
\myhrule
\ssparam{scanVariableScale}
{string. Must be `linear', `lin', 'logarithmic', or 'log'}
{Only when \sfincsScan~is run with \parlink{scanType} = 3.}
{Whether to space the values of \parlink{scanVariable} in a linear or logarithmic manner.
The settings `linear' and `lin' have identical behavior.
The settings `logarithmic' and `log' have identical behavior.}
\myhrule
\subsection{Parameters related only to {\ttfamily scanType} = 4 or 5 (radial scans).}
Notice that exactly 1 of the 4 variables \parlink{psiHat\_max}, \parlink{psiN\_max}, \parlink{rHat\_max}, and \parlink{rN\_max}
will be used, depending on
\parlink{inputRadialCoordinate}.
Similarly, exactly 1 of the 4 variables
\parlink{psiHat\_min}, \parlink{psiN\_min}, \parlink{rHat\_min}, and \parlink{rN\_min}
will be used.
\myhrule
\ssparam{profilesScheme}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{How to specify the profiles of density, temperature, and (when \parlink{scanType} = 5)
the range of radial electric field to consider.\\
{\ttfamily profilesScheme} = 1: Read a `{\ttfamily profiles}' file which contains the input profiles on a grid in one of the 4 available radial coordinates.\\
{\ttfamily profilesScheme} = 2: Read a `{\ttfamily profiles}' file which contains the input profiles expressed as polynomials in one of the 4 available radial coordinates.
}
\myhrule
\ssparam{Nradius}
{integer}
{Only when \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Maximum number of values of minor radius to consider in the scan.
Depending on \parlink{geometryScheme} and \parlink{VMECRadialOption},
it may be that only surfaces available in the magnetic equilibrium file will be
used, in which case fewer than {\ttfamily Nradius} radii may be used.}
\myhrule
\ssparam{psiHat\_max}
{real}
{Only when \parlink{inputRadialCoordinate} = 0 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Maximum value of \parlink{psiHat} to use in the scan.}
\myhrule
\ssparam{psiHat\_min}
{real}
{Only when \parlink{inputRadialCoordinate} = 0 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Minimum value of \parlink{psiHat} to use in the scan.}
\myhrule
\ssparam{psiN\_max}
{real}
{Only when \parlink{inputRadialCoordinate} = 1 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Maximum value of \parlink{psiN} to use in the scan.}
\myhrule
\ssparam{psiN\_min}
{real}
{Only when \parlink{inputRadialCoordinate} = 1 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Minimum value of \parlink{psiN} to use in the scan.}
\myhrule
\ssparam{rHat\_max}
{real}
{Only when \parlink{inputRadialCoordinate} = 2 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Maximum value of \parlink{rHat} to use in the scan.}
\myhrule
\ssparam{rHat\_min}
{real}
{Only when \parlink{inputRadialCoordinate} = 2 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Minimum value of \parlink{rHat} to use in the scan.}
\myhrule
\ssparam{rN\_max}
{real}
{Only when \parlink{inputRadialCoordinate} = 3 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Maximum value of \parlink{rN} to use in the scan.}
\myhrule
\ssparam{rN\_min}
{real}
{Only when \parlink{inputRadialCoordinate} = 3 and \sfincsScan~is run with \parlink{scanType} = 4 or 5.}
{Minimum value of \parlink{rN} to use in the scan.}
\section{\PETSc~commands}
Command-line flags can be used to modify the behavior of any \PETSc~application, including \sfincs.
There are hundreds of \PETSc~options, and a list can be obtained by running with the command-line flag
{\ttfamily -help}. Here we list some of the more useful options.\\
\PETScParam{-help}
{Dumps a list of available command-line options to stdout.}
\myhrule
\PETScParam{-ksp\_view}
{Dumps detailed information to stdout related to the linear solver.}
\myhrule
\PETScParam{-ksp\_gmres\_restart {\normalfont \ttfamily$<$integer$>$}}
{After how many iterations will GMRES restart. Default is 2000. The convergence of GMRES slows every time a restart occurs, but restarts also free up memory.
The memory required by GMRES is typically quite small compared to the memory required for the $LU$ factorization.}
\myhrule
\PETScParam{-pc\_factor\_mat\_solver\_package {\normalfont \ttfamily$<$packagename$>$}}
{Which sparse direct solver package is used to $LU$-factorize the preconditioner matrix.
%Options are given by the items in quotation marks
%{\href{http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatSolverPackage.html#MatSolverPackage}{here.}}
This command-line flag overrides the related namelist parameter\\
\parlink{whichParallelSolverToFactorPreconditioner}.
See section \ref{sec:solvers} for further information about the available packages.
}
\section{\mumps~commands}
\label{sec:mumpsControlParameters}
The \mumps~solver package has many control parameters which are documented in the manual,
available \href{http://mumps-solver.org/}{here}.
In \sfincs, as in any \PETSc~application, these control parameters can be set
using the command-line flags {\ttfamily -mat\_mumps\_cntl\_X YYYY} (for floating point parameters)
and {\ttfamily -mat\_mumps\_icntl\_X YYYY} (for integer control parameters).
Here, {\ttfamily X} is the numeric index of the control parameter,
and {\ttfamily YYYY} is the desired setting.
Here we list some of the more useful options.\\
\myhrule
\PETScParam{-mat\_mumps\_icntl\_4 {\normalfont \ttfamily$<$integer$>$}}
{How much diagnostic information will be printed by \mumps. Default is 3, causing extensive diagnostic information to be printed
to standard output about the memory required for factorizing the preconditioner.
Set this parameter to 0 to suppress this output from mumps.}
\myhrule
\PETScParam{-mat\_mumps\_icntl\_14 {\normalfont \ttfamily$<$integer$>$}}
{Percentage margin allowed for increase of certain arrays during the $LU$ factorization.
The default value set by \sfincs~is 50 (higher than the original default value in \mumps.)
If \sfincs~exits with the \mumps~error {\ttfamily INFO(1)=-9}, then further increasing this parameter may help.}
\myhrule
\PETScParam{-mat\_mumps\_icntl\_22 1}
{Turns on the out-of-core solve capability,
which reduces the memory required at the cost of speed.
See section \ref{sec:parallelization} for further details.}
\myhrule
\PETScParam{-mat\_mumps\_icntl\_28 2}
{Uses one of the parallelized libraries ParMETIS or PT-SCOTCH for analyzing the matrix,
instead of the default serial algorithm.}
| {
"alphanum_fraction": 0.7307681726,
"avg_line_length": 37.9394572025,
"ext": "tex",
"hexsha": "3e07d2e6fae891cab80ffa0f1523169f871ebf21",
"lang": "TeX",
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2022-02-03T14:37:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-03-19T14:30:10.000Z",
"max_forks_repo_head_hexsha": "a529954fd36330e1b5c816612943f39829f3542f",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "amollen/sfincs",
"max_forks_repo_path": "doc/manual/Fourier/inputParameters.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "a529954fd36330e1b5c816612943f39829f3542f",
"max_issues_repo_issues_event_max_datetime": "2021-01-28T09:53:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-01-02T09:04:48.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "amollen/sfincs",
"max_issues_repo_path": "doc/manual/Fourier/inputParameters.tex",
"max_line_length": 191,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "a529954fd36330e1b5c816612943f39829f3542f",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "amollen/sfincs",
"max_stars_repo_path": "doc/manual/Fourier/inputParameters.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T17:56:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-13T15:15:11.000Z",
"num_tokens": 21302,
"size": 72692
} |
% !TEX root = hott_intro.tex
\section{The fundamental theorem of identity types}\label{chap:fundamental}
\sectionmark{The fundamental theorem}
\index{fundamental theorem of identity types|(}
\index{characterization of identity type!fundamental theorem of identity types|(}
For many types it is useful to have a characterization of their identity types. For example, we have used a characterization of the identity types of the fibers of a map in order to conclude that any equivalence is a contractible map. The fundamental theorem of identity types is our main tool to carry out such characterizations, and with the fundamental theorem it becomes a routine task to characterize an identity type whenever that is of interest.
In our first application of the fundamental theorem of identity types we show that any equivalence is an embedding. Embeddings are maps that induce equivalences on identity types, i.e., they are the homotopical analogue of injective maps. In our second application we characterize the identity types of coproducts.
Throughout the rest of this book we will encounter many more occasions to characterize identity types. For example, we will show in \cref{thm:eq_nat} that the identity type of the natural numbers is equivalent to its observational equality, and we will show in \cref{thm:eq-circle} that the loop space of the circle is equivalent to $\Z$.
In order to prove the fundamental theorem of identity types, we first prove the basic fact that a family of maps is a family of equivalences if and only if it induces an equivalence on total spaces.
\subsection{Families of equivalences}
\index{family of equivalences|(}
\begin{defn}
Consider a family of maps
\begin{equation*}
f : \prd{x:A}B(x)\to C(x).
\end{equation*}
We define the map\index{total(f)@{$\tot{f}$}}
\begin{equation*}
\tot{f}:\sm{x:A}B(x)\to\sm{x:A}C(x)
\end{equation*}
by $\lam{(x,y)}(x,f(x,y))$.
\end{defn}
\begin{lem}\label{lem:fib_total}
For any family of maps $f:\prd{x:A}B(x)\to C(x)$ and any $t:\sm{x:A}C(x)$,
there is an equivalence\index{fiber!of total(f)@{of $\tot{f}$}}\index{total(f)@{$\tot{f}$}!fiber}
\begin{equation*}
\eqv{\fib{\tot{f}}{t}}{\fib{f(\proj 1(t))}{\proj 2(t)}}.
\end{equation*}
\end{lem}
\begin{proof}
For any $p:\fib{\tot{f}}{t}$ we define $\varphi(t,p):\fib{\proj 1(t)}{\proj 2(t)}$ by $\Sigma$-induction on $p$. Therefore it suffices to define $\varphi(t,(s,\alpha)):\fib{\proj 1(t)}{\proj 2 (t)}$ for any $s:\sm{x:A}B(x)$ and $\alpha:\tot{f}(s)=t$. Now we proceed by path induction on $\alpha$, so it suffices to define $\varphi(\tot{f}(s),(s,\refl{})):\fib{f(\proj 1(\tot{f}(s)))}{\proj 2(\tot{f}(s))}$. Finally, we use $\Sigma$-induction on $s$ once more, so it suffices to define
\begin{equation*}
\varphi((x,f(x,y)),((x,y),\refl{})):\fib{f(x)}{f(x,y)}.
\end{equation*}
Now we take as our definition
\begin{equation*}
\varphi((x,f(x,y)),((x,y),\refl{}))\defeq(y,\refl{}).
\end{equation*}
For the proof that this map is an equivalence we construct a map
\begin{equation*}
\psi(t) : \fib{f(\proj 1(t))}{\proj 2(t)}\to\fib{\tot{f}}{t}
\end{equation*}
equipped with homotopies $G(t):\varphi(t)\circ\psi(t)\htpy\idfunc$ and $H(t):\psi(t)\circ\varphi(t)\htpy\idfunc$. In each of these definitions we use $\Sigma$-induction and path induction all the way through, until an obvious choice of definition becomes apparent. We define $\psi(t)$, $G(t)$, and $H(t)$ as follows:
\begin{align*}
\psi((x,f(x,y)),(y,\refl{})) & \defeq ((x,y),\refl{}) \\
G((x,f(x,y)),(y,\refl{})) & \defeq \refl{} \\
H((x,f(x,y)),((x,y),\refl{})) & \defeq \refl{}.\qedhere
\end{align*}
\end{proof}
\begin{thm}\label{thm:fib_equiv}
Let $f:\prd{x:A}B(x)\to C(x)$ be a family of maps. The following are equivalent:
\index{is an equivalence!total(f) of family of equivalences@{$\tot{f}$ of family of equivalences}}
\index{total(f)@{$\tot{f}$}!of family of equivalences is an equivalence}\index{is family of equivalences!if total(f) is an equivalence@{iff $\tot{f}$ is an equivalence}}
\begin{enumerate}
\item For each $x:A$, the map $f(x)$ is an equivalence. In this case we say that $f$ is a \define{family of equivalences}.
\item The map $\tot{f}:\sm{x:A}B(x)\to\sm{x:A}C(x)$ is an equivalence.
\end{enumerate}
\end{thm}
\begin{proof}
By \cref{thm:equiv_contr,thm:contr_equiv} it suffices to show that $f(x)$ is a contractible map for each $x:A$, if and only if $\tot{f}$ is a contractible map. Thus, we will show that $\fib{f(x)}{c}$ is contractible if and only if $\fib{\tot{f}}{x,c}$ is contractible, for each $x:A$ and $c:C(x)$. However, by \cref{lem:fib_total} these types are equivalent, so the result follows by \cref{ex:contr_equiv}.
\end{proof}
Now consider the situation where we have a map $f:A\to B$, and a family $C$ over $B$. Then we have the map
\begin{equation*}
\lam{(x,z)}(f(x),z):\sm{x:A}C(f(x))\to\sm{y:B}C(y).
\end{equation*}
We claim that this map is an equivalence when $f$ is an equivalence. The technique to prove this claim is the same as the technique we used in \cref{thm:fib_equiv}: first we note that the fibers are equivalent to the fibers of $f$, and then we use the fact that a map is an equivalence if and only if its fibers are contractible to finish the proof.
The converse of the following lemma does not hold. Why not?
\begin{lem}\label{lem:total-equiv-base-equiv}
Consider an equivalence $e:A\simeq B$, and let $C$ be a type family over $B$. Then the map
\begin{equation*}
\sigma_f(C) \defeq\lam{(x,z)}(f(x),z):\sm{x:A}C(f(x))\to\sm{y:B}C(y)
\end{equation*}
is an equivalence.
\end{lem}
\begin{proof}
We claim that for each $t:\sm{y:B}C(y)$ there is an equivalence
\begin{equation*}
\fib{\sigma_f(C)}{t}\simeq \fib{f}{\proj 1(t)}.
\end{equation*}
We obtain such an equivalence by constructing the following functions and homotopies:
\begin{align*}
\varphi(t) & : \fib{\sigma_f(C)}{t}\to\fib{f}{\proj 1 (t)} & \varphi((f(x),z),((x,z),\refl{})) & \defeq (x,\refl{}) \\
\psi(t) & : \fib{f}{\proj 1(t)} \to\fib{\sigma_f(C)}{t} & \psi((f(x),z),(x,\refl{})) & \defeq ((x,z),\refl{}) \\
G(t) & : \varphi(t)\circ\psi(t)\htpy\idfunc & G((f(x),z),(x,\refl{})) & \defeq \refl{} \\
H(t) & : \psi(t)\circ\varphi(t)\htpy\idfunc & H((f(x),z),((x,z),\refl{})) & \defeq \refl{}.
\end{align*}
Now the claim follows, since we see that $\varphi$ is a contractible map if and only if $f$ is a contractible map.
\end{proof}
We now combine \cref{thm:fib_equiv,lem:total-equiv-base-equiv}.
\begin{defn}
Consider a map $f:A\to B$ and a family of maps
\begin{equation*}
g:\prd{x:A}C(x)\to D(f(x)),
\end{equation*}
where $C$ is a type family over $A$, and $D$ is a type family over $B$. In this situation we also say that $g$ is a \define{family of maps over $f$}. Then we define\index{total f(g)@{$\tot[f]{g}$}}
\begin{equation*}
\tot[f]{g}:\sm{x:A}C(x)\to\sm{y:B}D(y)
\end{equation*}
by $\tot[f]{g}(x,z)\defeq (f(x),g(x,z))$.
\end{defn}
\begin{thm}\label{thm:equiv-toto}
Suppose that $g$ is a family of maps over $f$, and suppose that $f$ is an equivalence. Then the following are equivalent:
\begin{enumerate}
\item The family of maps $g$ over $f$ is a family of equivalences.
\item The map $\tot[f]{g}$ is an equivalence.
\end{enumerate}
\end{thm}
\begin{proof}
Note that we have a commuting triangle
\begin{equation*}
\begin{tikzcd}[column sep=0]
\sm{x:A}C(x) \arrow[rr,"{\tot[f]{g}}"] \arrow[dr,swap,"\tot{g}"]& & \sm{y:B}D(y) \\
& \sm{x:A}D(f(x)) \arrow[ur,swap,"{\lam{(x,z)}(f(x),z)}"]
\end{tikzcd}
\end{equation*}
By the assumption that $f$ is an equivalence, it follows that the map $\sm{x:A}D(f(x))\to \sm{y:B}D(y)$ is an equivalence. Therefore it follows that $\tot[f]{g}$ is an equivalence if and only if $\tot{g}$ is an equivalence. Now the claim follows, since $\tot{g}$ is an equivalence if and only if $g$ if a family of equivalences.
\end{proof}
\index{family of equivalences|)}
\subsection{The fundamental theorem}
\index{identity system|(}
Many types come equipped with a reflexive relation that possesses a similar
structure as the identity type. The observational equality on the natural
numbers is such an example. We have see that it is a reflexive, symmetric, and
transitive relation, and moreover it is contained in any other reflexive
relation. Thus, it is natural to ask whether observational equality on the natural numbers is equivalent to the identity type.
The fundamental theorem of identity types (\cref{thm:id_fundamental}) is a general theorem that can be used to answer such questions. It describes a necessary and sufficient condition on a type family $B$ over a type $A$ equipped with a point $a:A$, for there to be a family of equivalences $\prd{x:A}(a=x)\simeq B(x)$. In other words, it tells us when a family $B$ is a characterization of the identity type of $A$.
Before we state the fundamental theorem of identity types we introduce the notion of \emph{identity systems}. Those are families $B$ over a $A$ that satisfy an induction principle that is similar to the path induction principle, where the `computation rule' is stated with an identification.
\begin{defn}
Let $A$ be a type equipped with a term $a:A$. A \define{(unary) identity system} on $A$ at $a$ consists of a type family $B$ over $A$ equipped with $b:B(a)$, such that for any family of types $P(x,y)$ indexed by $x:A$ and $y:B(x)$,
the function
\begin{equation*}
h\mapsto h(a,b):\Big(\prd{x:A}\prd{y:B(x)}P(x,y)\Big)\to P(a,b)
\end{equation*}
has a section.
\end{defn}
The most important implication in the fundamental theorem is that (ii) implies (i). Occasionally we will also use the third equivalent statement. We note that the fundamental theorem also appears as Theorem 5.8.4 in \cite{hottbook}.
\begin{thm}\label{thm:id_fundamental}
Let $A$ be a type with $a:A$, and let $B$ be be a type family over $A$ with $b:B(a)$.
Then the following are logically equivalent for any family of maps
\begin{equation*}
f:\prd{x:A}(a=x)\to B(x).
\end{equation*}
\begin{enumerate}
\item The family of maps $f$ is a family of equivalences.
\item The total space\index{is contractible!total space of an identity system}
\begin{equation*}
\sm{x:A}B(x)
\end{equation*}
is contractible.
\item The family $B$ is an identity system.
\end{enumerate}
In particular the canonical family of maps
\begin{equation*}
\pathind_a(b):\prd{x:A} (a=x)\to B(x)
\end{equation*}
is a family of equivalences if and only if $\sm{x:A}B(x)$ is contractible.
\end{thm}
\begin{proof}
First we show that (i) and (ii) are equivalent.
By \cref{thm:fib_equiv} it follows that the family of maps $f$ is a family of equivalences if and only if it induces an equivalence
\begin{equation*}
\eqv{\Big(\sm{x:A}a=x\Big)}{\Big(\sm{x:A}B(x)\Big)}
\end{equation*}
on total spaces. We have that $\sm{x:A}a=x$ is contractible. Now it follows by \cref{ex:contr_equiv}, applied in the case
\begin{equation*}
\begin{tikzcd}[column sep=3em]
\sm{x:A}a=x \arrow[rr,"\tot{f}"] \arrow[dr,swap,"\eqvsym"] & & \sm{x:A}B(x) \arrow[dl] \\
& \unit & \phantom{\sm{x:A}a=x}
\end{tikzcd}
\end{equation*}
that $\tot{f}$ is an equivalence if and only if $\sm{x:A}B(x)$ is contractible.
Now we show that (ii) and (iii) are equivalent. Note that we have the following commuting triangle
\begin{equation*}
\begin{tikzcd}[column sep=0]
\prd{t:\sm{x:A}B(x)}P(t) \arrow[rr,"\evpair"] \arrow[dr,swap,"{\evpt(a,b)}"] & & \prd{x:A}\prd{y:B(x)}P(x,y) \arrow[dl,"{\lam{h}h(a,b)}"] \\
\phantom{\prd{x:A}\prd{y:B(x)}P(x,y)} & P(a,b)
\end{tikzcd}
\end{equation*}
In this diagram the top map has a section. Therefore it follows by \cref{ex:3_for_2} that the left map has a section if and only if the right map has a section. Notice that the left map has a section for all $P$ if and only if $\sm{x:A}B(x)$ satisfies singleton induction, which is by \cref{thm:contractible} equivalent to $\sm{x:A}B(x)$ being contractible.
\end{proof}
\index{identity system|)}
\subsection{Embeddings}
\index{embedding|(}
As an application of the fundamental theorem we show that equivalences are embeddings. The notion of embedding is the homotopical analogue of the set theoretic notion of injective map.
\begin{defn}
An \define{embedding} is a map $f:A\to B$\index{is an embedding} satisfying the property that\index{is an equivalence!action on paths of an embedding}
\begin{equation*}
\apfunc{f}:(\id{x}{y})\to(\id{f(x)}{f(y)})
\end{equation*}
is an equivalence for every $x,y:A$. We write $\isemb(f)$\index{is-emb(f)@{$\isemb(f)$}} for the type of witnesses that $f$ is an embedding.
\end{defn}
Another way of phrasing the following statement is that equivalent types have equivalent identity types.
\begin{thm}
\label{cor:emb_equiv}
Any equivalence is an embedding.\index{is an embedding!equivalence}\index{equivalence!is an embedding}
\end{thm}
\begin{proof}
Let $e:\eqv{A}{B}$ be an equivalence, and let $x:A$. Our goal is to show that
\begin{equation*}
\apfunc{e} : (\id{x}{y})\to (\id{e(x)}{e(y)})
\end{equation*}
is an equivalence for every $y:A$. By \cref{thm:id_fundamental} it suffices to show that
\begin{equation*}
\sm{y:A}e(x)=e(y)
\end{equation*}
is contractible for every $y:A$. Now observe that there is an equivalence
\begin{samepage}
\begin{align*}
\sm{y:A}e(x)=e(y) & \eqvsym \sm{y:A}e(y)=e(x) \\
& \jdeq \fib{e}{e(x)}
\end{align*}
\end{samepage}
by \cref{thm:fib_equiv}, since for each $y:A$ the map
\begin{equation*}
\invfunc : (e(x)=e(y))\to (e(y)= e(x))
\end{equation*}
is an equivalence by \cref{ex:equiv_grpd_ops}.
The fiber $\fib{e}{e(x)}$ is contractible by \cref{thm:contr_equiv}, so it follows by \cref{ex:contr_equiv} that the type $\sm{y:A}e(x)=e(y)$ is indeed contractible.
\end{proof}
\index{embedding|)}
\subsection{Disjointness of coproducts}
\index{disjointness of coproducts|(}
\index{characterization of identity type!coproduct|(}
\index{identity type!coproduct|(}
\index{coproduct!identity type|(}
\index{coproduct!disjointness|(}
To give a second application of the fundamental theorem of identity types, we characterize the identity types of coproducts. Our goal in this section is to prove the following theorem.
\begin{thm}\label{thm:id-coprod-compute}
Let $A$ and $B$ be types. Then there are equivalences
\begin{align*}
(\inl(x)=\inl(x')) & \eqvsym (x = x')\\
(\inl(x)=\inr(y')) & \eqvsym \emptyt \\
(\inr(y)=\inl(x')) & \eqvsym \emptyt \\
(\inr(y)=\inr(y')) & \eqvsym (y=y')
\end{align*}
for any $x,x':A$ and $y,y':B$.
\end{thm}
In order to prove \cref{thm:id-coprod-compute}, we first define
a binary relation $\Eqcoprod_{A,B}$ on the coproduct $A+B$.
\begin{defn}
Let $A$ and $B$ be types. We define
\begin{equation*}
\Eqcoprod_{A,B} : (A+B)\to (A+B)\to\UU
\end{equation*}
by double induction on the coproduct, postulating
\begin{align*}
\Eqcoprod_{A,B}(\inl(x),\inl(x')) & \defeq (x=x') \\
\Eqcoprod_{A,B}(\inl(x),\inr(y')) & \defeq \emptyt \\
\Eqcoprod_{A,B}(\inr(y),\inl(x')) & \defeq \emptyt \\
\Eqcoprod_{A,B}(\inr(y),\inr(y')) & \defeq (y=y')
\end{align*}
The relation $\Eqcoprod_{A,B}$ is also called the \define{observational equality of coproducts}\index{observational equality!of coproducts}.
\end{defn}
\begin{lem}
The observational equality relation $\Eqcoprod_{A,B}$ on $A+B$ is reflexive, and therefore there is a map
\begin{equation*}
\Eqcoprodeq:\prd{s,t:A+B} (s=t)\to \Eqcoprod_{A,B}(s,t)
\end{equation*}
\end{lem}
\begin{constr}
The reflexivity term $\rho$ is constructed by induction on $t:A+B$, using
\begin{align*}
\rho(\inl(x))\defeq \refl{\inl(x)} & : \Eqcoprod_{A,B}(\inl(x)) \\
\rho(\inr(y))\defeq \refl{\inr(y)} & : \Eqcoprod_{A,B}(\inr(y)).\qedhere
\end{align*}
\end{constr}
To show that $\Eqcoprodeq$ is a family of equivalences, we will use the fundamental theorem, \cref{thm:id_fundamental}. Moreover, we will use the functoriality of coproducts (established in \cref{ex:coproduct_functor}), and the fact that any total space over a coproduct is again a coproduct:
\begin{align*}
\sm{t:A+B}P(t) & \eqvsym \Big(\sm{x:A}P(\inl(x))\Big)+\Big(\sm{y:B}P(\inr(y))\Big)
\end{align*}
All of these equivalences are straightforward to construct, so we leave them as an exercise to the reader.
\begin{lem}\label{lem:is-contr-total-eq-coprod}
For any $s:A+B$ the total space
\begin{equation*}
\sm{t:A+B}\Eqcoprod_{A,B}(s,t)
\end{equation*}
is contractible.
\end{lem}
\begin{proof}
We will do the proof by induction on $s$. The two cases are similar, so we only show that the total space
\begin{equation*}
\sm{t:A+B}\Eqcoprod_{A,B}(\inl(x),t)
\end{equation*}
is contractible. Note that we have equivalences
\begin{samepage}
\begin{align*}
& \sm{t:A+B}\Eqcoprod_{A,B}(\inl(x),t) \\
& \eqvsym \Big(\sm{x':A}\Eqcoprod_{A,B}(\inl(x),\inl(x'))\Big)+\Big(\sm{y':B}\Eqcoprod_{A,B}(\inl(x),\inr(y'))\Big) \\
& \eqvsym \Big(\sm{x':A}x=x'\Big)+\Big(\sm{y':B}\emptyt\Big) \\
& \eqvsym \Big(\sm{x':A}x=x'\Big)+\emptyt \\
& \eqvsym \sm{x':A}x=x'.
\end{align*}%
\end{samepage}%
In the last two equivalences we used \cref{ex:unit-laws-coprod}. This shows that the total space is contractible, since the latter type is contractible by \cref{thm:total_path}.
\end{proof}
\begin{proof}[Proof of \cref{thm:id-coprod-compute}]
The proof is now concluded with an application of \cref{thm:id_fundamental}, using \cref{lem:is-contr-total-eq-coprod}.
\end{proof}
\index{disjointness of coproducts|)}
\index{characterization of identity type!coproduct|)}
\index{identity type!coproduct|)}
\index{coproduct!identity type|)}
\index{coproduct!disjointness|)}
\begin{exercises}
\exercise
\begin{subexenum}
\item \label{ex:is-emb-empty}Show that the map $\emptyt\to A$ is an embedding for every type $A$.\index{is an embedding!0 to A@{$\emptyt\to A$}}
\item \label{ex:is-emb-inl-inr}Show that $\inl:A\to A+B$ and $\inr:B\to A+B$ are embeddings for any two types $A$ and $B$.
\index{is an embedding!inl (for coproducts)@{$\inl$ (for coproducts)}}
\index{is an embedding!inr (for coproducts)@{$\inr$ (for coproducts)}}
\index{inl@{$\inl$}!is an embedding}
\index{inr@{$\inr$}!is an embedding}
\end{subexenum}
\exercise Consider an equivalence $e:A\simeq B$. Construct an equivalence
\begin{equation*}
(e(x)=y)\simeq(x=e^{-1}(y))
\end{equation*}
for every $x:A$ and $y:B$.
\exercise Show that\index{embedding!closed under homotopies}
\begin{equation*}
(f\htpy g)\to (\isemb(f)\leftrightarrow\isemb(g))
\end{equation*}
for any $f,g:A\to B$.
\exercise \label{ex:emb_triangle}Consider a commuting triangle
\begin{equation*}
\begin{tikzcd}[column sep=tiny]
A \arrow[rr,"h"] \arrow[dr,swap,"f"] & & B \arrow[dl,"g"] \\
& X
\end{tikzcd}
\end{equation*}
with $H:f\htpy g\circ h$.
\begin{subexenum}
\item Suppose that $g$ is an embedding. Show that $f$ is an embedding if and only if $h$ is an embedding.\index{is an embedding!composite of embeddings}\index{is an embedding!right factor of embedding if left factor is an embedding}
\item Suppose that $h$ is an equivalence. Show that $f$ is an embedding if and only if $g$ is an embedding.\index{is an embedding!left factor of embedding if right factor is an equivalence}
\end{subexenum}
\exercise \label{ex:is-equiv-is-equiv-functor-coprod}Consider two maps $f:A\to A'$ and $g:B \to B'$.
\begin{subexenum}
\item Show that if the map
\begin{equation*}
f+g:(A+B)\to (A'+B')
\end{equation*}
is an equivalence, then so are both $f$ and $g$ (this is the converse of \cref{ex:coproduct_functor_equivalence}).
\item \label{ex:is-emb-coprod}Show that $f+g$ is an embedding if and only if both $f$ and $g$ are embeddings.
\end{subexenum}
\exercise \label{ex:htpy_total}
\begin{subexenum}
\item Let $f,g:\prd{x:A}B(x)\to C(x)$ be two families of maps. Show that
\begin{equation*}
\Big(\prd{x:A}f(x)\htpy g(x)\Big)\to \Big(\tot{f}\htpy \tot{g}\Big).
\end{equation*}
\item Let $f:\prd{x:A}B(x)\to C(x)$ and let $g:\prd{x:A}C(x)\to D(x)$. Show that
\begin{equation*}
\tot{\lam{x}g(x)\circ f(x)}\htpy \tot{g}\circ\tot{f}.
\end{equation*}
\item For any family $B$ over $A$, show that
\begin{equation*}
\tot{\lam{x}\idfunc[B(x)]}\htpy\idfunc.
\end{equation*}
\end{subexenum}
\exercise \label{ex:id_fundamental_retr}Let $a:A$, and let $B$ be a type family over $A$.
\begin{subexenum}
\item Use \cref{ex:htpy_total,ex:contr_retr} to show that if each $B(x)$ is a retract of $\id{a}{x}$, then $B(x)$ is equivalent to $\id{a}{x}$ for every $x:A$.
\index{fundamental theorem of identity types!formulation with retractions}
\item Conclude that for any family of maps
\index{fundamental theorem of identity types!formulation with sections}
\begin{equation*}
f : \prd{x:A} (a=x) \to B(x),
\end{equation*}
if each $f(x)$ has a section, then $f$ is a family of equivalences.
\end{subexenum}
\exercise Use \cref{ex:id_fundamental_retr} to show that for any map $f:A\to B$, if
\begin{equation*}
\apfunc{f} : (x=y) \to (f(x)=f(y))
\end{equation*}
has a section for each $x,y:A$, then $f$ is an embedding.\index{is an embedding!if the action on paths have sections}
\exercise \label{ex:path-split}We say that a map $f:A\to B$ is \define{path-split}\index{path-split} if $f$ has a section, and for each $x,y:A$ the map
\begin{equation*}
\apfunc{f}(x,y):(x=y)\to (f(x)=f(y))
\end{equation*}
also has a section. We write $\pathsplit(f)$\index{path-split(f)@{$\pathsplit(f)$}} for the type
\begin{equation*}
\sections(f)\times\prd{x,y:A}\sections(\apfunc{f}(x,y)).
\end{equation*}
Show that for any map $f:A\to B$ the following are equivalent:
\begin{enumerate}
\item The map $f$ is an equivalence.
\item The map $f$ is path-split.
\end{enumerate}
\exercise \label{ex:fiber_trans}Consider a triangle
\begin{equation*}
\begin{tikzcd}[column sep=small]
A \arrow[rr,"h"] \arrow[dr,swap,"f"] & & B \arrow[dl,"g"] \\
& X
\end{tikzcd}
\end{equation*}
with a homotopy $H:f\htpy g\circ h$ witnessing that the triangle commutes.
\begin{subexenum}
\item Construct a family of maps
\begin{equation*}
\fibtriangle(h,H):\prd{x:X}\fib{f}{x}\to\fib{g}{x},
\end{equation*}
for which the square
\begin{equation*}
\begin{tikzcd}[column sep=8em]
\sm{x:X}\fib{f}{x} \arrow[r,"\tot{\fibtriangle(h,H)}"] \arrow[d] & \sm{x:X}\fib{g}{x} \arrow[d] \\
A \arrow[r,swap,"h"] & B
\end{tikzcd}
\end{equation*}
commutes, where the vertical maps are as constructed in \cref{ex:fib_replacement}.
\item Show that $h$ is an equivalence if and only if $\fibtriangle(h,H)$ is a family of equivalences.
\end{subexenum}
\end{exercises}
\index{fundamental theorem of identity types|)}
\index{characterization of identity type!fundamental theorem of identity types|)}
\endinput
\begin{comment}
\exercise \label{ex:eqv_sigma_mv}Consider a map
\begin{equation*}
f:A \to \sm{y:B}C(y).
\end{equation*}
\begin{subexenum}
\item Construct a family of maps
\begin{equation*}
f':\prd{y:B} \fib{\proj 1\circ f}{y}\to C(y).
\end{equation*}
\item Construct an equivalence
\begin{equation*}
\eqv{\fib{f'(b)}{c}}{\fib{f}{(b,c)}}
\end{equation*}
for every $(b,c):\sm{y:B}C(y)$.
\item Conclude that the following are equivalent:
\begin{enumerate}
\item $f$ is an equivalence.
\item $f'$ is a family of equivalences.
\end{enumerate}
\end{subexenum}
\exercise \label{ex:coh_intro}Consider a type $A$ with base point $a:A$, and let $B$ be a type family on $A$ that implies the identity type, i.e., there is a term
\begin{equation*}
\alpha : \prd{x:A} B(x)\to (a=x).
\end{equation*}
Show that the \define{coherence reduction map}
\begin{equation*}
\cohreduction : \Big(\sm{y:B(a)}\alpha(a,y)=\refl{a}\Big) \to \Big(\sm{x:A}B(x)\Big)
\end{equation*}
defined by $\lam{(y,q)}(a,y)$ is an equivalence.
\end{comment}
| {
"alphanum_fraction": 0.6765620465,
"avg_line_length": 48.7252525253,
"ext": "tex",
"hexsha": "33061d0deaad22e9312a48a551b65b060adb7477",
"lang": "TeX",
"max_forks_count": 30,
"max_forks_repo_forks_event_max_datetime": "2022-03-16T00:33:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-09-26T09:08:57.000Z",
"max_forks_repo_head_hexsha": "09c710bf9c31ba88be144cc950bd7bc19c22a934",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "hemangandhi/HoTT-Intro",
"max_forks_repo_path": "Book/fundamental.tex",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "09c710bf9c31ba88be144cc950bd7bc19c22a934",
"max_issues_repo_issues_event_max_datetime": "2020-10-16T15:27:01.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-06-18T04:16:04.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "hemangandhi/HoTT-Intro",
"max_issues_repo_path": "Book/fundamental.tex",
"max_line_length": 486,
"max_stars_count": 333,
"max_stars_repo_head_hexsha": "09c710bf9c31ba88be144cc950bd7bc19c22a934",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "hemangandhi/HoTT-Intro",
"max_stars_repo_path": "Book/fundamental.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T23:50:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-09-26T08:33:30.000Z",
"num_tokens": 8203,
"size": 24119
} |
\section{Future Work}
\label{sec:future}
Because of the nature of SINATRA, it is expected for there to be a large amount of future work. A critical part of SINATRA is that it will be developed into a state-of-the-art homegrown code which Cal Poly can use to help develop new aerospace technology.
\subsection{Boundaries}
One part of SINATRA which is underdeveloped is the handling of boundaries. Currently, SINATRA handles the main 6 boundaries of a cube. It has the capability to define the type of wall and the characteristics of the particles flowing through that wall. However, it ignores any boundaries inside of the domain, cannot break a boundary into multiple sections. While dealing with boundaries inside the domain will not be needed for a plasma plume, it will be important to be able to split the boundaries so that part of the wall can simulate the thruster nozzle. The path forward for boundaries is twofold. First, a boundary class needs to be built within SINATRA which allows the user to specify sections which will be different from the rest of the domain. At that stage it would be reasonable to simulate a thruster nozzle. \par
\indent The next stage would be to build Cart3D\textsuperscript{TM} integration. Cart3D\textsuperscript{TM} is a meshing software that can create an octree mesh which includes internal domain boundaries. This allows a user to import a Computer-aided design (CAD) file and get out a mesh which SINATRA can understand. This is critical for upper atmosphere calculations around aircraft or spacecraft bodies. It can also be used for objects in Low Earth Orbit. The Cart3D\textsuperscript{TM} tool will allow users to specify what each external and internal boundary type is and is able to split these boundaries into much smaller pieces. Cart3D\textsuperscript{TM} can also dynamically change the mesh size depending on distance to a wall and other user specifications which will allow for more accurate DSMC-PIC simulation data. This integration will need to be completed before SINATRA can create useful simulation results. The future researchers can also choose a difference meshing software depending on future requirements. \par
\subsection{Electric Thruster}
As mentioned above, in order for an accurate electric thruster simulation, there will need to be changes in how boundaries are handled in SINATRA. Updating the boundaries is the most important change that will be needed for accurate simulations of electric thruster plumes. The Poisson equation solver would need to be upgraded as well. Currently, a finite volume solver is being used. This is a robust solver which is well-researched and understood, but it has a few restrictions. First, and most importantly, the solver expects the mesh to be evenly sized across the entire domain. This works with the current version of SINATRA's home-built meshing software; however, once the mesh is not completely uniform, the Poisson solver will break. This solver can only handle straight boundaries, which is acceptable because the DSMC portion uses straight approximations of curves through the octree mesh. There are many other options for a Poisson solver that have been explored in PIC research. For example, the conjugate gradient solver might be a good option for SINATRA. Conjugate difference requires a symmetric positive definite matrix which the Finite Difference method creates \cite{FD_GS}. This upgrade would also greatly reduce the execution time, and therefore allow for larger and more accurate simulations. \par
% TO DO talk about the solver - using sparce matrices, handling the difference between cells and nodes, and changing solver
\subsection{Charged Particles}
There are many facets to simulating charged particles. While the author has captured the largest features, there are many other physical attributes which make charged particles a complicated and interesting subject. There are two main physical properties which would be the most likely candidates to be added to SINATRA. They are charged collisions and surface interactions. \par
\indent When charged particles are involved, there are many new types of particle collisions. Some types are ionization and recombination collisions. These are being ignored in SINATRA because the electrons are being modeled as a fluid. However, if magnetic fields were to be included, for example for a magnetic nozzle, or if the grids of an ion thruster need to be modeled, then the fluid assumption would no longer be valid. The simulation would have to drastically reduce its simulation time-step for the fast electrons and also need to consider ionization and recombination collisions. There are also chemical interactions that are also not being considered in the scope of this project. Both of those types of collisions will most likely not be needed for accurate plasma plume simulations. In order for these collisions to be included, the collision class will need to be updated to be able to handle multiple schemes within the same simulation. \par
\indent However, charge exchange collisions will need to be included eventually. Charge exchange collisions are instances where the electron orbital cloud of an ion and a neutral particle will interact \cite{pic_generic}. There is a possibility in these collision for an electron to be stripped from the neutral atom and become attached to the charged ion. The charge is exchanged between the two particles; the neutral atom becomes a charged ion and the charged ion becomes a neutral atom. However, there is no significant change in momentum. This type of collision is common and significant in electric thruster plumes. While most thrusters are efficient at ionizing the propellant, there are still neutral atoms which come out of the chamber and into the plume. Their relatively low velocities cause the relative density of the neutral atoms to be high near the thruster nozzle. Charge exchange collisions (CEX) are therefore likely. The resulting slow-moving ions are very susceptible to the radial component of the electric field set up by the plume. While the fast-moving ions will diverge, they do so at a low angle because of their high initial velocity. However, these new ions are moving slowly and therefore are more easily affected by the radial electric field. They create what is called CEX wings, seen in Figure \ref{fig:CEX} \cite{cex_wings}. This radial direction of charged ions can cause major interactions with the spacecraft including contamination, sputtering, and spacecraft charging. Therefore CEX interactions should be included when doing full spacecraft analysis.
\begin{figure}
\includegraphics[width=.65\textwidth]{CEX.JPG}
\centering
\caption[Visualization of CEX wings]{Visualization of CEX wings \textmd{\cite{cex_wings}}}
\label{fig:CEX}
\end{figure}
\indent Another important quality of charged particles is their surface interactions. When high energy particles collide with metal surfaces they sputter off material and degregate the surface. This is an important design consideration in spacecraft with electric propulsion. SINATRA models various types of surface interactions through its boundary handling. There can be inflow, outflow, specular and diffuse walls. However, especially with CEX collisions, charged particles can interact with a spacecraft surface and impart their charge onto it \cite{surface_charge}. The surface can be modeled as a grounded conductor or as a perfect insulator or as a mix between the two. This type of modeling will need to be included into SINATRA whenever boundaries are introduced into the domain.
\subsection{SINATRA Efficiency and Capability}
\label{sec:auto_mesh}
There are many sections in SINATRA that need to be updated before SINATRA can be used as a cutting edge research tool. It was developed by mechanical and aerospace engineers, not by computer scientists. Therefore, many of the algorithms, storage methods, and memory access are not optimized. The author has removed the largest and simplest bottlenecks, but there are other areas that need to be optimized. The next largest bottleneck is the sampling of data. When SINATRA samples data from the simulation, it prints out text files. This printing is one of the slowest portions of the simulation. Tecplot\textsuperscript{TM} has an API which allows users to print binary files and Tecplot is able to input those binary files. This was attempted by the original developers, but they were not successful. It requires a deeper understanding of C++ and binary. There are many examples of possible optimization throughout the code which can be fixed by a developer with that type of skill set. It may also be possible to switch to the Visualization Toolkit VTK which has other features which may make sampling data much simpler and quicker. \par
\indent Within a computer scientist skill set lies parallelization. Similar to the bottlenecks above the author has implemented a simple version of parallelization; however, there are many better ways to implement it. It could be implemented by splitting the domain into multiple pieces, which is a possibility on account of the regularity of the octree mesh. However, this may not be the best option because particles must be transferred between the domains which is very computationally expensive. Another option would be to split a larger section of each time-step into many parts. The simulation could also be parallelized by having each core run the same simulation. Then the multiple different simulations could be averaged as a way to reduce the randomness of DSMC so that an accurate solution is calculated. Parallelization techniques are still on the cutting edge of computer science, and therefore this would be a fruitful project for a developer with experience in this area. \par
\indent Within the DSMC community, there are many schools of thought about the best way to work with time-steps and mesh sizes \cite{bird_dsmc}. It is possible to have variable time-steps as well as variable mesh sizes. It is also possible for the time-step to change for each particle depending on their velocity and the size of the mesh cell they are within. There are algorithms which create a mesh which changes cell size depending on the average number of particles in a cell. This creates a mesh which changes with the flow and eventually sets up an optimal mesh for that steady state flow condition. \par
\indent SINATRA is currently at its first iteration; therefore it uses a fixed time-step for all particles and a fixed mesh. Upgrading SINATRA to a more complicated time-step and mesh algorithm would be a good project for a future developer. In order to keep charged particle capability, the Poisson solver would have to be upgraded at the same time because it is currently based on the fixed mesh. This upgrade could greatly reduce SINATRA's execution time; therefore, it would allow higher resolution and accurate simulations. It would be beneficial to implement a user choice for different Poisson solvers so that a user can compare various solvers and use their various advantages depending on their simulation requirements. It would also be useful to include an option for the user to have SINATRA assume a linearized Boltzmann equation, which assumes that \(n_i-n_e << n_i\). This would allow a much quicker calculation of the potential and be a good low accuracy solver for the beginning stages of complicated simulations. \par
\indent The stencil implementation will also need to be upgraded. When non-uniform Cartesian grids are included in SINATRA the stencil implementation will fail. The Finite Difference Method requires an uniform mesh. However, the Finite Volume method may be a good alternative to be used for the new mesh types. It will need to be upgraded to be much more robust to accommodate complicated meshes. The stencil implementation can also be upgraded. Currently the sparse matrix wastes a large amount of computing power through having a large empty matrix and multiplying many zeros. This can be improved thorough either sparse matrix calculations or by calculating the required row of the matrix only when going to do operations upon it. Finally, the future developers will need to be careful when using a non-uniform Cartesian grid and depositing charge on account of the octree structure. \par
\indent Another area of possible improvement is within the octree search algorithm. Future developers will need to start with confirming that the current search algorithm fits the expected \(n log(n)\) order where n is the number of items in the search. Once confirmed other advanced algorithms can be implemented including aligning the octree mesh to an integer system so that the integer positions can be used as inputs into the data matrix within the calculations. \par
\subsection{Systems Operations}
It will be important to continually update the systems engineering sections of SINATRA. The author has set up systems which will hopefully be helpful at keeping SINATRA up-to-date and relevant, but they will need to be monitored and maintained
\par
\indent First, the simple distributions need to be kept up-to-date. There are distributions for Windows, Linux, and Mac. If SINATRA continues to grow at Cal Poly, an official release website with version control can be set up. Until then it will be released through .zip files being sent to the new user; therefore, the distributions need to be up-to-date with the current stable version of SINATRA, input files, and the GUI. \par
\indent The GUI will also need to be updated as there are changes to the input class. Whenever the input class requires a different way to input variables to the simulation, the GUI will also need to be changed to accommodate for those changes. It will also have to be updated when the boundary class or output class changes their user interface. It is not difficult to keep the GUI up-to-date, but it can easily become obsolete if it is left alone during further development. \par
\indent The author has ensured that the GitHub\textsuperscript{\textregistered} repository is kept clean and up-to-date as much as possible. It will be necessary to use the GitHub\textsuperscript{\textregistered} repository while developing new code. One pitfall could be that new developers only develop on their local machines and ignore the GitHub\textsuperscript{\textregistered} repository. This habit could ruin the continuity of the version control of GitHub\textsuperscript{\textregistered} and more importantly could make it harder for new developers to add their contributions. The GitHub\textsuperscript{\textregistered} should be kept as a version of the code which can be easily shared with new developers, so they are not be confused nor distracted by extra files and information. If SINATRA is well taken care of, it will become a legacy code that will allow Cal Poly to shine as a school with advanced modeling skills and allow new and revolutionary technology to come from ``Learning by Doing".
| {
"alphanum_fraction": 0.8156023656,
"avg_line_length": 235.140625,
"ext": "tex",
"hexsha": "bf451b945cca4714b4d84b1b0fde09b809409584",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7f987df93321a6522a784aa9d0dc788608c2166a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dclunde/thesis-template",
"max_forks_repo_path": "sections/FutureWork.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7f987df93321a6522a784aa9d0dc788608c2166a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dclunde/thesis-template",
"max_issues_repo_path": "sections/FutureWork.tex",
"max_line_length": 1591,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7f987df93321a6522a784aa9d0dc788608c2166a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dclunde/thesis-template",
"max_stars_repo_path": "sections/FutureWork.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3012,
"size": 15049
} |
% PACKAGE IMPORTS
\usepackage{geometry}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{titling}
\usepackage{listings}
\usepackage{enumitem}
\usepackage{tgschola}
% DOCUMENT COMMANDS
\newcommand{\thickline}{\noindent\rule{\textwidth}{2pt}}
\newcommand{\preparetitle}{\title{CMPM 163 Notes}\author{Malcolm Riley}\date{Winter 2019\quad\textemdash\quad\today}}
\newenvironment{topic}[1]{\section*{#1}\begin{itemize}}{\end{itemize}}
\newcommand{\inputcode}[2]{\lstinputlisting[title=\textbf{#1},label=#2]{#2}}
\newcommand{\code}[1]{\texttt{#1}}
\newcommand{\term}[1]{\textbf{#1}}
\newcommand{\quotes}[1]{``#1''}
% DOCUMENT CONFIGURATION
\geometry{letterpaper, margin=1in}
\pretitle{\thickline\begin{center}\LARGE\bfseries}
\posttitle{\end{center}}
\predate{\begin{center}}
\postdate{\end{center}\thickline\\[1em]}
\setlist[itemize,1]{label=\textbf{--}}
\lstset{basicstyle=\small\ttfamily,float=h,tabsize=4,frame=single,firstnumber=1,numbers=left,stepnumber=1,aboveskip=1em,belowskip=1em} | {
"alphanum_fraction": 0.7564102564,
"avg_line_length": 36.2142857143,
"ext": "tex",
"hexsha": "ed67370b37652430e27b6a6676de4c6ab4358add",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6ed7f35fbde210e39ae5b2e171d6fd6ed3356a5b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "malcolmriley/CMPM-163",
"max_forks_repo_path": "notes/_PREAMBLE.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6ed7f35fbde210e39ae5b2e171d6fd6ed3356a5b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "malcolmriley/CMPM-163",
"max_issues_repo_path": "notes/_PREAMBLE.tex",
"max_line_length": 134,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6ed7f35fbde210e39ae5b2e171d6fd6ed3356a5b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "malcolmriley/CMPM-163",
"max_stars_repo_path": "notes/_PREAMBLE.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 357,
"size": 1014
} |
\section{Brownian Motion}
\begin{frame}{Brownian motion}
Brownian motion can be constructed as a limit of random walks.
\begin{eqnarray*}
P(Y_i=\Delta x)=\frac{1}{2} \\
P(Y_i=-\Delta x)=\frac{1}{2}
\end{eqnarray*}
\pause
\begin{center}
What is the distribution of
\begin{equation*}
X_n=Y_1+Y_2+...+Y_n
\end{equation*}
if $Y_1,Y_2,...,Y_n$ are i.i.d.?
\end{center}
\end{frame}
\begin{frame}
\begin{eqnarray*}
X_n&= &Y_1+Y_2+...+Y_n\\ \\
\pause
\ln (M_{X_n})(\lambda)&\approx &\frac{(\Delta x)^2}{\Delta t} \left(\frac{\lambda ^2 T}{2}+\frac{\lambda ^4 \Delta x ^2 T}{4}+... \right)\\
\end{eqnarray*}
\pause
If we assume that $\frac{(\Delta x)^2}{\Delta t}=k$ then,
\begin{equation*}
\lim_{\Delta x \to 0}{M_{X_n}(\lambda)}=\exp\left(\frac{kT\lambda ^2}{2}\right)
\end{equation*}
which means $X_n \sim N(0,kT)$
\end{frame}
\begin{frame}
\begin{block}{Standard Brownian Motion}
A random variable $B(t)$ that depends continuously on $t \in [0,T]$ and satisfies:
\begin{itemize}
\item $B(0)=0$
\item For $0 \leq s<t\leq T$: $B(t)-B(s)\sim N(0, t-s)$
\item For $0 \leq s<t<u<v\leq T$ the increments $B(t)-B(s)$ and $B(v)-B(u)$ are independent.
\end{itemize}
\end{block}
\end{frame}
\begin{frame}{Computational Purposes}
\pause
Discretized Brownian motion: $B(t)$ is specified at discrete t values.
\begin{eqnarray*}
&& W(0)=0\\
&& W(j)=W(j-1)+dW(j)
\end{eqnarray*}
$dW(j)$ is an independent random variable of the form $N(0,\Delta t)$\cite{doi:10.1137/S0036144500378302}.
\end{frame}
\begin{frame}
\begin{center}
\includegraphics[scale=0.5]{r_w_3.png}
\end{center}
\end{frame}
\begin{frame}
\begin{center}
\includegraphics[scale=0.5]{r_w_4.png}
\end{center}
\end{frame}
\begin{frame}
\begin{center}
\includegraphics[scale=0.5]{hist.png}
\end{center}
\end{frame}
\begin{frame}
\begin{center}
\includegraphics[scale=0.5]{qqplot.png}
\end{center}
\end{frame}
\section{Stochastic Integrals}
\begin{frame}{Stochastic Differential Equations}
\pause
We'll take an ordinary differential equation that contains a deterministic part and add an additional stochastic term:
\begin{eqnarray*}
dx=fdt+gdB
\end{eqnarray*}
\end{frame}
\begin{frame}{Stochastic Integrals}
\pause
\begin{center}
What does $\int_{a}^{b} f(t)dB$ mean? \bigskip \pause $\int_{a}^{b} B(t)dB$?\pause \\
\begin{equation*}
\sum_{i=1}^{n}B(t_{j-1})\left( B(t_{j})-B(t_{j-1})\right) ?
\end{equation*}
\pause
\bigskip
In $[0,1]$ with $\Delta t=1/500$:\\
Left hand side=-0.42765\\
Mid-Point=0.088892
\end{center}
\end{frame}
\begin{frame}
\begin{block}{Definition}
Given f(t) a continuous function with bounded variation. Let $a_i=f(t_{i-1})$, then \textbf{the Wiener integral} is:
\begin{equation*}
I(f):=\lim_{n \to \infty}\sum_{i=1}^{n}a_i(B(t_i)-B(t_{i-1}))
\end{equation*}
\pause
\begin{itemize}
\item $E[I(f)]=0$
\item $Var[I(f)]=\int_{a}^{b}f^2(t)dt$
\end{itemize}
\end{block}
\end{frame}
| {
"alphanum_fraction": 0.6735191638,
"avg_line_length": 23.5245901639,
"ext": "tex",
"hexsha": "98e9ab12dacdf16eda2062d26dcdb8003cd68383",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a54ace642d8696250c7fa0bf574b16a931ec91c9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SUNY-SDE-2015/REU15",
"max_forks_repo_path": "Presentations/Midterm/HighamPaper.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "a54ace642d8696250c7fa0bf574b16a931ec91c9",
"max_issues_repo_issues_event_max_datetime": "2015-07-09T15:38:17.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-06-04T17:55:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SUNY-SDE-2015/REU15",
"max_issues_repo_path": "Presentations/Midterm/HighamPaper.tex",
"max_line_length": 139,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a54ace642d8696250c7fa0bf574b16a931ec91c9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SUNY-SDE-2015/REU15",
"max_stars_repo_path": "Presentations/Midterm/HighamPaper.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1117,
"size": 2870
} |
% uWaterloo Thesis Template for LaTeX
% Last Updated June 14, 2017 by Stephen Carr, IST Client Services
% FOR ASSISTANCE, please send mail to [email protected]
% Effective October 2006, the University of Waterloo
% requires electronic thesis submission. See the uWaterloo thesis regulations at
% https://uwaterloo.ca/graduate-studies/thesis.
% DON'T FORGET TO ADD YOUR OWN NAME AND TITLE in the "hyperref" package
% configuration below. THIS INFORMATION GETS EMBEDDED IN THE PDF FINAL PDF DOCUMENT.
% You can view the information if you view Properties of the PDF document.
% Many faculties/departments also require one or more printed
% copies. This template attempts to satisfy both types of output.
% It is based on the standard "book" document class which provides all necessary
% sectioning structures and allows multi-part theses.
% DISCLAIMER
% To the best of our knowledge, this template satisfies the current uWaterloo requirements.
% However, it is your responsibility to assure that you have met all
% requirements of the University and your particular department.
% Many thanks for the feedback from many graduates that assisted the development of this template.
% -----------------------------------------------------------------------
% By default, output is produced that is geared toward generating a PDF
% version optimized for viewing on an electronic display, including
% hyperlinks within the PDF.
% E.g. to process a thesis called "mythesis.tex" based on this template, run:
% pdflatex mythesis -- first pass of the pdflatex processor
% bibtex mythesis -- generates bibliography from .bib data file(s)
% makeindex -- should be run only if an index is used
% pdflatex mythesis -- fixes numbering in cross-references, bibliographic references, glossaries, index, etc.
% pdflatex mythesis -- fixes numbering in cross-references, bibliographic references, glossaries, index, etc.
% If you use the recommended LaTeX editor, Texmaker, you would open the mythesis.tex
% file, then click the PDFLaTeX button. Then run BibTeX (under the Tools menu).
% Then click the PDFLaTeX button two more times. If you have an index as well,
% you'll need to run MakeIndex from the Tools menu as well, before running pdflatex
% the last two times.
% N.B. The "pdftex" program allows graphics in the following formats to be
% included with the "\includegraphics" command: PNG, PDF, JPEG, TIFF
% Tip 1: Generate your figures and photos in the size you want them to appear
% in your thesis, rather than scaling them with \includegraphics options.
% Tip 2: Any drawings you do should be in scalable vector graphic formats:
% SVG, PNG, WMF, EPS and then converted to PNG or PDF, so they are scalable in
% the final PDF as well.
% Tip 3: Photographs should be cropped and compressed so as not to be too large.
% To create a PDF output that is optimized for double-sided printing:
%
% 1) comment-out the \documentclass statement in the preamble below, and
% un-comment the second \documentclass line.
%
% 2) change the value assigned below to the boolean variable
% "PrintVersion" from "false" to "true".
% --------------------- Start of Document Preamble -----------------------
% Specify the document class, default style attributes, and page dimensions
% For hyperlinked PDF, suitable for viewing on a computer, use this:
\documentclass[letterpaper,12pt,titlepage,oneside,final]{book}
% For PDF, suitable for double-sided printing, change the PrintVersion variable below
% to "true" and use this \documentclass line instead of the one above:
%\documentclass[letterpaper,12pt,titlepage,openright,twoside,final]{book}
% Some LaTeX commands I define for my own nomenclature.
% If you have to, it's better to change nomenclature once here than in a
% million places throughout your thesis!
\newcommand{\package}[1]{\textbf{#1}} % package names in bold text
\newcommand{\cmmd}[1]{\textbackslash\texttt{#1}} % command name in tt font
\newcommand{\href}[1]{#1} % does nothing, but defines the command so the
\newcommand{\imgsrc}[1]{\caption*{\scriptsize{Source: #1}}} % used for image captions
\newcommand{\tabh}[1]{\multicolumn{1}{c|}{\textbf{#1}}} % used for table headers
\newcommand{\tabc}[2]{\multicolumn{1}{|c|}{\multirow{#1}{*}{\textbf{#2}}}} % used for the top left cell in a table
\newcommand{\loss}[1]{\mathcal{L}_\text{#1}} % generic loss symbol
% print-optimized version will ignore \href tags (redefined by hyperref pkg).
%\newcommand{\texorpdfstring}[2]{#1} % does nothing, but defines the command
% Anything defined here may be redefined by packages added below...
% This package allows if-then-else control structures.
\usepackage{ifthen}
\newboolean{PrintVersion}
\setboolean{PrintVersion}{false}
% CHANGE THIS VALUE TO "true" as necessary, to improve printed results for hard copies
% by overriding some options of the hyperref package below.
%\usepackage{nomencl} % For a nomenclature (optional; available from ctan.org)
\usepackage{amsmath,amssymb,amstext} % Lots of math symbols and environments
\usepackage[pdftex]{graphicx} % For including graphics N.B. pdftex graphics driver
\usepackage{natbib} % For better bibliography formatting
\usepackage[dvipsnames]{xcolor} % Additional colors
\usepackage[T1]{fontenc}
\usepackage{bm}
\usepackage{caption}
\usepackage[hyphens]{url}
\usepackage[linesnumbered,lined,boxed]{algorithm2e}
\usepackage{multirow}
\usepackage{makecell}
\usepackage{indentfirst}
% Hyperlinks make it very easy to navigate an electronic document.
% In addition, this is where you should specify the thesis title
% and author as they appear in the properties of the PDF document.
% Use the "hyperref" package
% N.B. HYPERREF MUST BE THE LAST PACKAGE LOADED; ADD ADDITIONAL PKGS ABOVE
\usepackage[pdftex,pagebackref=false]{hyperref} % with basic options
% N.B. pagebackref=true provides links back from the References to the body text. This can cause trouble for printing.
\hypersetup{
plainpages=false, % needed if Roman numbers in frontpages
unicode=false, % non-Latin characters in Acrobat’s bookmarks
pdftoolbar=true, % show Acrobat’s toolbar?
pdfmenubar=true, % show Acrobat’s menu?
pdffitwindow=false, % window fit to page when opened
pdfstartview={FitH}, % fits the width of the page to the window
pdftitle={Disentangled Representation Learning for Stylistic Variation in Neural Language Models}, % title: CHANGE THIS TEXT!
pdfauthor={Vineet John}, % author: CHANGE THIS TEXT! and uncomment this line
pdfsubject={Natural Language Processing}, % subject: CHANGE THIS TEXT! and uncomment this line
pdfkeywords={nlp} {neural-networks} {style-transfer}, % list of keywords, and uncomment this line if desired
pdfnewwindow=true, % links in new window
colorlinks=true, % false: boxed links; true: colored links
linkcolor=blue, % color of internal links
citecolor=blue, % color of links to bibliography
filecolor=magenta, % color of file links
urlcolor=cyan % color of external links
}
\ifthenelse{\boolean{PrintVersion}}{ % for improved print quality, change some hyperref options
\hypersetup{ % override some previously defined hyperref options
% colorlinks,%
citecolor=black,%
filecolor=black,%
linkcolor=black,%
urlcolor=black}
}{} % end of ifthenelse (no else)
\usepackage[automake,toc,abbreviations]{glossaries-extra} % Exception to the rule of hyperref being the last add-on package
% If glossaries-extra is not in your LaTeX distribution, get it from CTAN (http://ctan.org/pkg/glossaries-extra),
% although it's supposed to be in both the TeX Live and MikTeX distributions. There are also documentation and
% installation instructions there.
% Setting up the page margins...
% uWaterloo thesis requirements specify a minimum of 1 inch (72pt) margin at the
% top, bottom, and outside page edges and a 1.125 in. (81pt) gutter
% margin (on binding side). While this is not an issue for electronic
% viewing, a PDF may be printed, and so we have the same page layout for
% both printed and electronic versions, we leave the gutter margin in.
% Set margins to minimum permitted by uWaterloo thesis regulations:
\setlength{\marginparwidth}{0pt} % width of margin notes
% N.B. If margin notes are used, you must adjust \textwidth, \marginparwidth
% and \marginparsep so that the space left between the margin notes and page
% edge is less than 15 mm (0.6 in.)
\setlength{\marginparsep}{0pt} % width of space between body text and margin notes
\setlength{\evensidemargin}{0.125in} % Adds 1/8 in. to binding side of all
% even-numbered pages when the "twoside" printing option is selected
\setlength{\oddsidemargin}{0.125in} % Adds 1/8 in. to the left of all pages
% when "oneside" printing is selected, and to the left of all odd-numbered
% pages when "twoside" printing is selected
\setlength{\textwidth}{6.375in} % assuming US letter paper (8.5 in. x 11 in.) and
% side margins as above
\raggedbottom
% The following statement specifies the amount of space between
% paragraphs. Other reasonable specifications are \bigskipamount and \smallskipamount.
\setlength{\parskip}{\medskipamount}
% The following statement controls the line spacing. The default
% spacing corresponds to good typographic conventions and only slight
% changes (e.g., perhaps "1.2"), if any, should be made.
\renewcommand{\baselinestretch}{1} % this is the default line space setting
% By default, each chapter will start on a recto (right-hand side)
% page. We also force each section of the front pages to start on
% a recto page by inserting \cleardoublepage commands.
% In many cases, this will require that the verso page be
% blank and, while it should be counted, a page number should not be
% printed. The following statements ensure a page number is not
% printed on an otherwise blank verso page.
\let\origdoublepage\cleardoublepage
\newcommand{\clearemptydoublepage}{%
\clearpage{\pagestyle{empty}\origdoublepage}}
\let\cleardoublepage\clearemptydoublepage
% Define Glossary terms (This is properly done here, in the preamble. Could be \input{} from a file...)
% Main glossary entries -- definitions of relevant terminology
\makeglossaries
%======================================================================
% L O G I C A L D O C U M E N T -- the content of your thesis
%======================================================================
\begin{document}
% For a large document, it is a good idea to divide your thesis
% into several files, each one containing one chapter.
% To illustrate this idea, the "front pages" (i.e., title page,
% declaration, borrowers' page, abstract, acknowledgements,
% dedication, table of contents, list of tables, list of figures,
% nomenclature) are contained within the file "uw-ethesis-frontpgs.tex" which is
% included into the document by the following statement.
%----------------------------------------------------------------------
% FRONT MATERIAL
%----------------------------------------------------------------------
\input{uw-ethesis-frontpgs}
%----------------------------------------------------------------------
% MAIN BODY
%----------------------------------------------------------------------
% Because this is a short document, and to reduce the number of files
% needed for this template, the chapters are not separate
% documents as suggested above, but you get the idea. If they were
% separate documents, they would each start with the \chapter command, i.e,
% do not contain \documentclass or \begin{document} and \end{document} commands.
%======================================================================
\chapter{Introduction}
%======================================================================
\input{chapters/01-introduction}
%======================================================================
\chapter{Background}
%======================================================================
\input{chapters/02-background}
%======================================================================
\chapter{Related Work}
%======================================================================
\input{chapters/03-related-work}
%======================================================================
\chapter{Approach}
%======================================================================
\input{chapters/04-approach}
%======================================================================
\chapter{Experiments}
%======================================================================
\input{chapters/05-experiments}
%======================================================================
\chapter{Analysis}
%======================================================================
\input{chapters/06-analysis}
%======================================================================
\chapter{Conclusion and Future Work}
%======================================================================
\input{chapters/07-conclusion}
%----------------------------------------------------------------------
% END MATERIAL
%----------------------------------------------------------------------
% B I B L I O G R A P H Y
% -----------------------
% The following statement selects the style to use for references. It controls the sort order of the entries in the bibliography and also the formatting for the in-text labels.
\bibliographystyle{unsrtnat}
% This specifies the location of the file containing the bibliographic information.
% It assumes you're using BibTeX (if not, why not?).
\cleardoublepage % This is needed if the book class is used, to place the anchor in the correct page,
% because the bibliography will start on its own page.
% Use \clearpage instead if the document class uses the "oneside" argument
\phantomsection % With hyperref package, enables hyperlinking from the table of contents to bibliography
% The following statement causes the title "References" to be used for the bibliography section:
\renewcommand*{\bibname}{References}
% Add the References to the Table of Contents
\addcontentsline{toc}{chapter}{\textbf{References}}
\bibliography{uw-ethesis}
% Tip 5: You can create multiple .bib files to organize your references.
% Just list them all in the \bibliogaphy command, separated by commas (no spaces).
% The following statement causes the specified references to be added to the bibliography% even if they were not
% cited in the text. The asterisk is a wildcard that causes all entries in the bibliographic database to be included (optional).
\nocite{*}
\end{document}
| {
"alphanum_fraction": 0.6703229323,
"avg_line_length": 50.6137931034,
"ext": "tex",
"hexsha": "1ef71ef30c6a2112856cf2b55e649e7ecb8fb607",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c1340df3cc53a8b52670adf07d2102730b3891fb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vineetjohn/uwaterloo-mmath-cs-thesis",
"max_forks_repo_path": "uw-ethesis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c1340df3cc53a8b52670adf07d2102730b3891fb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vineetjohn/uwaterloo-mmath-cs-thesis",
"max_issues_repo_path": "uw-ethesis.tex",
"max_line_length": 177,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c1340df3cc53a8b52670adf07d2102730b3891fb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vineetjohn/uwaterloo-mmath-cs-thesis",
"max_stars_repo_path": "uw-ethesis.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3351,
"size": 14678
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{\label{sec:URL-transfer}
Enabling the Transfer of Files Specified by a URL}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\index{file transfer mechanism!input file specified by URL}
\index{file transfer mechanism!output file(s) specified by URL}
\index{URL file transfer}
Because staging data on the submit machine is not always efficient,
Condor permits input files to be transferred
from a location specified by a URL;
likewise, output files may be transferred
to a location specified by a URL.
All transfers (both input and output) are accomplished by invoking
a \Term{plug-in},
an executable or shell script that handles the task of file transfer.
For transferring input files,
URL specification is limited to jobs running under the vanilla universe
and to a vm universe VM image file.
The execute machine retrieves the files.
This differs from the normal file transfer mechanism,
in which transfers are from the machine where the job is submitted
to the machine where the job is executed.
Each file to be transferred by specifying a URL, causing a
plug-in to be invoked, is specified separately in the job submit
description file with the command \SubmitCmd{transfer\_input\_files};
see section~\ref{sec:file-transfer-by-URL} for details.
For transferring output files,
either the entire output sandbox, which are all files produced or
modified by the job as it executes, or a subset of these files,
as specified by the submit description file command
\SubmitCmd{transfer\_output\_files} are transferred to the
directory specified by the URL.
The URL itself is specified in the separate submit description file command
\SubmitCmd{output\_destination};
see section~\ref{sec:file-transfer-by-URL} for details.
The plug-in is invoked once for each output file to be transferred.
Configuration identifies the availability of the one or more plug-in(s).
The plug-ins must be installed and available on every execute machine
that may run a job which might specify a URL, either for input or for output.
URL transfers are enabled by default in the configuration
of execute machines.
Disabling URL transfers is accomplished by setting
\footnotesize
\begin{verbatim}
ENABLE_URL_TRANSFERS = FALSE
\end{verbatim}
\normalsize
A comma separated list giving the absolute path and name
of all available plug-ins is specified as in the example:
\footnotesize
\begin{verbatim}
FILETRANSFER_PLUGINS = /opt/condor/plugins/wget-plugin, \
/opt/condor/plugins/hdfs-plugin, \
/opt/condor/plugins/custom-plugin
\end{verbatim}
\normalsize
The \Condor{starter} invokes all listed plug-ins to determine their
capabilities. Each may handle one or more protocols (scheme names).
The plug-in's response to invocation identifies which protocols
it can handle.
When a URL transfer is specified by a job,
the \Condor{starter} invokes the proper one to do the transfer.
If more than one plugin is capable of handling a particular protocol,
then the last one within the list given by \MacroNI{FILETRANSFER\_PLUGINS}
is used.
Condor assumes that all plug-ins will respond in specific
ways.
To determine the capabilities of the plug-ins as to which protocols
they handle,
the \Condor{starter} daemon invokes each plug-in giving it the
command line argument \Opt{-classad}.
In response to invocation with this command line argument,
the plug-in must respond with an output of three ClassAd attributes.
The first two are fixed:
\footnotesize
\begin{verbatim}
PluginVersion = "0.1"
PluginType = "FileTransfer"
\end{verbatim}
\normalsize
The third ClassAd attribute is \Attr{SupportedMethods}.
This attribute is a string containing a comma separated list of the
protocols that the plug-in handles.
So, for example
\footnotesize
\begin{verbatim}
SupportedMethods = "http,ftp,file"
\end{verbatim}
\normalsize
would identify that the three protocols described by \verb@http@,
\verb@ftp@, and \verb@file@ are supported.
These strings will match the protocol specification as given
within a URL in a \SubmitCmd{transfer\_input\_files} command
or within a URL in an \SubmitCmd{output\_destination} command
in a submit description file for a job.
When a job specifies a URL transfer,
the plug-in is invoked, without the command line argument \Opt{-classad}.
It will instead be given two other command line arguments.
For the transfer of input file(s),
the first will be the URL of the file to retrieve
and the second will be the absolute path identifying where to place the
transferred file.
For the transfer of output file(s),
the first will be the absolute path on the local machine of the file
to transfer,
and the second will be the URL of the directory and file name
at the destination.
The plug-in is expected to do the transfer,
exiting with status 0 if the transfer was successful,
and a non-zero status if the transfer was \emph{not} successful.
When \emph{not} successful, the job is placed on hold,
and the job ClassAd attribute \Attr{HoldReason} will be set as
appropriate for the job.
The job ClassAd attribute \Attr{HoldReasonSubCode} will be set to
the exit status of the plug-in.
As an example of the transfer of a subset of output files,
assume that the submit description file contains
\footnotesize
\begin{verbatim}
output_destination = url://server/some/directory/
transfer_output_files = foo, bar, qux
\end{verbatim}
\normalsize
Condor invokes the plug-in that handles the \Expr{url} protocol
three times.
The directory delimiter
(\verb@/@ on Unix, and \verb@\@ on Windows)
is appended to the destination URL,
such that the three (Unix) invocations of the plug-in will appear similar to
\footnotesize
\begin{verbatim}
url_plugin /path/to/local/copy/of/foo url://server/some/directory//foo
url_plugin /path/to/local/copy/of/bar url://server/some/directory//bar
url_plugin /path/to/local/copy/of/qux url://server/some/directory//qux
\end{verbatim}
\normalsize
Note that this functionality is not limited to a predefined set
of protocols.
New ones can be invented.
As an invented example,
the \verb@zkm@ transfer type writes random bytes to a file.
The plug-in that handles \verb@zkm@ transfers would respond to
invocation with the \Opt{-classad} command line argument with:
\footnotesize
\begin{verbatim}
PluginVersion = "0.1"
PluginType = "FileTransfer"
SupportedMethods = "zkm"
\end{verbatim}
\normalsize
And, then when a job requested that this plug-in be invoked,
for the invented example:
\footnotesize
\begin{verbatim}
transfer_input_files = zkm://128/r-data
\end{verbatim}
\normalsize
the plug-in will be invoked with a first command line argument
of \verb@zkm://128/r-data@ and a second command line argument giving
the full path along with the file name \File{r-data} as the location
for the plug-in to write 128 bytes of random data.
The transfer of output files in this manner was introduced
in Condor version 7.6.0.
Incompatibility and inability to function will result if the executables
for the \Condor{starter} and \Condor{shadow} are versions earlier
than Condor version 7.6.0.
Here is the expected behavior for these cases that
cannot be backward compatible.
\begin{itemize}
\item
If the \Condor{starter} version is earlier than 7.6.0,
then regardless of the \Condor{shadow} version,
transfer of output files, as identified in the submit description
file with the command \SubmitCmd{output\_destination} is ignored.
The files are transferred back to the submit machine.
\item
If the \Condor{starter} version is 7.6.0 or later,
but the \Condor{shadow} version is earlier than 7.6.0,
then the \Condor{starter} will attempt to send the command to the
\Condor{shadow}, but the \Condor{shadow} will ignore the command.
No files will be transferred, and the job will be placed on hold.
\end{itemize}
| {
"alphanum_fraction": 0.7733045466,
"avg_line_length": 39.9695431472,
"ext": "tex",
"hexsha": "a5ec8b9fb7b17ef1ac73b6f601dd5cca6a68d225",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-08-29T14:03:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-29T14:03:03.000Z",
"max_forks_repo_head_hexsha": "f0f41854fd418adaf24f430adf5217e514a044db",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "bbockelm/condor-network-accounting",
"max_forks_repo_path": "doc/admin-man/url-transfer.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f0f41854fd418adaf24f430adf5217e514a044db",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "bbockelm/condor-network-accounting",
"max_issues_repo_path": "doc/admin-man/url-transfer.tex",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f0f41854fd418adaf24f430adf5217e514a044db",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "bbockelm/condor-network-accounting",
"max_stars_repo_path": "doc/admin-man/url-transfer.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1848,
"size": 7874
} |
\documentclass[]{article}
\usepackage{amsmath}
%opening
\title{MTH 343 Numerical Analysis: Quiz 1}
\author{Sheikh Abdul Raheem Ali}
\begin{document}
\maketitle
\section*{PAGE 28/29:}
\begin{enumerate}
\item[1C] Compute the absolute and relative error in approximations of $ p \text{ by } p^* $.
\begin{align*}
p = \pi && p^* = 3.1416
\end{align*}
\item[2A] Find the largest interval in which $ p^* $ must lie to approximate $ p $ with relative error at most $ 10^-4 $.
\begin{align*}
p = \sqrt{2}
\end{align*}
\item[3A,B] Suppose $ p^* $ must approximate $ p $ with relative error at most $ 10^{-3} $. Find the largest interval in which $ p^* $ must lie for:
\begin{align*}
p_a = 150 && p_b = 900
\end{align*}
\item[4] Perform the following computations (i) exactly, (ii) using three-digit chopping arithmetic, and (iii) using three-digit rounding arithmetic. (iv) Compute the relative errors in parts (ii) and (iii).
\begin{align*}
\frac{4}{5} + \frac{1}{3}
\end{align*}
\begin{align*}
\frac{4}{5} \cdot \frac{1}{3}
\end{align*}
\begin{align*}
(\frac{1}{3} - \frac{3}{11}) + \frac{3}{20}
\end{align*}
\begin{align*}
(\frac{1}{3} + \frac{3}{11}) - \frac{3}{20}
\end{align*}
\item[5C,E,G] Use three-digit rounding arithmetic to perform the following calculations. Compute the absolute error and relative error with the exact value determined to at least five digits.
\begin{align*}
(121 - 0.327) - 119
\end{align*}
\begin{align*}
\frac{\frac{13}{14} - \frac{6}{7}}{2e - 5.4}
\end{align*}
\begin{align*}
(\frac{2}{9})\cdot(\frac{9}{7})
\end{align*}
\item[6E] Repeat exercise 5 using four digit rounding arithmetic.
\item[15 A,B] Use the 64-bit long real format to find the decimal equivalent of the following floating-point machine numbers:
\begin{enumerate}
\item 0 10000001010 1001001 100000000000000000000000000000000000000000
\item 1 10000001010 1001001 100000000000000000000000000000000000000000
\end{enumerate}
\end{enumerate}
\section*{PAGE 54/55:}
\begin{enumerate}
\item[1] Use the Bisection method to find $ p_3 $ for $ f(x) = \sqrt{x} - \cos x \text{on} [0,1]$.
\item[3 A,B] Use the Bisection method to find solutions accurate to within $ 10^{-2} $ for $ x^4 - 2x^3 - 4x^2 + 4x +4 = 0 $ on the intervals [-2,-1] and [0, 2].
\item[5 B,C 6B] Use the Bisection method to find solutions, accurate to within $ 10^{-5} $ for the following problems:
\begin{itemize}
\item $ 2x + 3\cos x - e^x = 0 $ for $ 1 \le x \le 2 $ and $ 2 \le x \le 4 $
\item $ x^2 - 4x + 4 - \ln x = 0 $ for $ 0 \le x \le 0.5 $ and $ 0.5 \le x \le 1 $
\item $ e^x - x^2 +3x - 2 = 0 $ for $ 0 \le x \le 1 $
\end{itemize}
\item[11A] Let $ f(x) = (x+2)(x+1)x(x-1)^3(x-2). $ To which zero of f does the Bisection method converge when applied on the interval $ [-1.5, 2.5] $
\item[13] Find an approximation to $ \sqrt{25} $ correct to within $ 10^{-4} $ using the Bisection Algorithm.
\end{enumerate}
\end{document}
| {
"alphanum_fraction": 0.6520861373,
"avg_line_length": 36.243902439,
"ext": "tex",
"hexsha": "12e26c3723ff2bb82482266e73245285c701ecce",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0c38d15d560ccbb8231c8ef210916ea94a0f004b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sheikheddy/aus-files",
"max_forks_repo_path": "Numerical/Quiz_1/Quiz1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0c38d15d560ccbb8231c8ef210916ea94a0f004b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sheikheddy/aus-files",
"max_issues_repo_path": "Numerical/Quiz_1/Quiz1.tex",
"max_line_length": 208,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0c38d15d560ccbb8231c8ef210916ea94a0f004b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sheikheddy/aus-files",
"max_stars_repo_path": "Numerical/Quiz_1/Quiz1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1133,
"size": 2972
} |
\documentclass[onecolumn, conference]{IEEEtran}
\IEEEoverridecommandlockouts
% The preceding line is only needed to identify funding in the first footnote. If that is unneeded, please comment it out.
\usepackage{cite}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{textcomp}
\usepackage{xcolor}
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
\begin{document}
\title{Tensor network and neural networks
}
\author{
\IEEEauthorblockN{Junjie He}
\IEEEauthorblockA{\textit{School of Information Science and Technology} \\
\textit{ShanghaiTech University}\\
Shanghai, China \\
[email protected]}
\and
\IEEEauthorblockN{Jiaqiong Zhang}
\IEEEauthorblockA{\textit{School of Information Science and Technology} \\
\textit{ShanghaiTech University}\\
Shanghai, China \\
[email protected]}
% \and
% \IEEEauthorblockN{3\textsuperscript{rd} Given Name Surname}
% \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
% \textit{name of organization (of Aff.)}\\
% City, Country \\
% email address or ORCID}
% \and
% \IEEEauthorblockN{4\textsuperscript{th} Given Name Surname}
% \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
% \textit{name of organization (of Aff.)}\\
% City, Country \\
% email address or ORCID}
% \and
% \IEEEauthorblockN{5\textsuperscript{th} Given Name Surname}
% \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
% \textit{name of organization (of Aff.)}\\
% City, Country \\
% email address or ORCID}
% \and
% \IEEEauthorblockN{6\textsuperscript{th} Given Name Surname}
% \IEEEauthorblockA{\textit{dept. name of organization (of Aff.)} \\
% \textit{name of organization (of Aff.)}\\
% City, Country \\
% email address or ORCID}
}
\maketitle
% \begin{abstract}
% This document is a model and instructions for \LaTeX.
% This and the IEEEtran.cls file define the components of your paper [title, text, heads, etc.]. *CRITICAL: Do Not Use Symbols, Special Characters, Footnotes,
% or Math in Paper Title or Abstract.
% \end{abstract}
% \begin{IEEEkeywords}
% component, formatting, style, styling, insert
% \end{IEEEkeywords}
\section{Introduction}
Main topic for our project are tensor methods and neural network.Deep neural networks currently demonstrate state-of-the-art performance in several domains.such as computer vision, speech recognition, text processing, etc.These advances have become possible because of algorithmic advances, large amounts of available data,and modern hardware. For example, convolutional neural networks (CNNs) \cite{b1}\cite{b2}show by a large margin superior performance on the task of image classification.These models have thousands of nodes and millions of learnable parameters and are trained using millions of images\cite{b3} on powerful Graphics Processing Units (GPUs).The necessity of expensive hardware and long processing time are the factors that complicate the application of such models on conventional desktops and portable devices.
In tradition neural network,this layer has a linear transformation of a high dimension input signal to a high dimension output signal.For example,the data set CIFAR10 widely used in deep learning course is a collection of pictures.When it used as input signal into neural network,the pictures in it will be divided 32*32*3 pixels.particularly, 32*32 means a picture divided into 32*32 pixel blocks,and 3 means three channels(RGB).Then,the input signal will be reshaped a 32*32*3 dimensional vector .Obviously,such an operation will largely increase the dimension of input signal,and leads to further complexity of the calculations.The data set CIFAR10\cite{b5} is already a very simple data set in the field of deep learning.However,in the convolutional neural network model used in practical application the dimensions of the input and output signals of the fully-connected layers are of the order of thousands, bringing the number of parameters of the fully-connected layers up to millions.This is undoubtedly a very demanding requirement for hardware facilities.
Consequently, a large number of works tried to reduce both hardware requirements (e. g. memory demands) and running times.To solve this problem,We consider the most frequently used layer of neural network:fully-connected layer.We use a compact tensor train data set to represent the matrix of the fully-connected layers using few parameters while keeping enough flexibility to perform signal transformations\cite{b6}.And,the layer transformed should be compatible with the existing training algorithms for neural network,because all the derivatives required by the back propagation algorithm\cite{b4} can be computed using the properties of Tensor train set.
Tensors are natural multidimensional generalizations of matrices and have attracted tremendous interest in recent years.Multilinear algebra,tensor analysis, and the theory of tensor approximations play increasingly important roles in computational mathematics and numerical analysis\cite{b7}\cite{b8}\cite{b9}\cite{b10}. An efficient representation of a tensor (by tensor we mean only an array with d indices) by a small number of parameters may give us an opportunity and ability to work with d-dimensional problems, with d being as high as 10, 100, 1000 or even one million.Problems of such sizes cannot be handled by standard numerical methods due to the curse of dimensionality, since everything (memory, amount of operations) grows exponentially in d.So,Tensor train decomposition will a effective way to solve this problem.
We will apply our method to popular network architectures proposed for data set CIFAR10.We will experimentally use the networks with tradition fully-connected layer and the tensor fully-connected layer to train a neural network model.Then,we will compare the performance of two models.
\section{Preliminary ideas}
In various fields, low-rank approximation was applied to reduce the computation cost and memory usage. In \cite{b11}, they generalize the idea of low-rank. The authors do not find low-rank approximation of weight matrix in fully-connected layers, they treat the matrix as multidimensional tensor and employ Tensor Train decomposition \cite{b6} to accelerate the computation.
Usally, wider neural network can achieve better performance than narrow neural network. But wide neural networks imply large dense matrix, amount of computation resources are used in per step when training neural networks. By using Tensor Train decomposition for weight matrix, wide neural network can be developed for applications with moderate computation cost and memory usage.
\cite{b11} shows that wide and shallow neural networks has competitive performance with the state-of-art deep neural networks by traing a shallow network oin the outputs of a trained deep neural network.
They report the improvement of performance with the increase of the layer size and used up to 30 000 hidden units while restricting the matrix rank of the weight matrix in order to be able to keep and to update it during the training. Restricting the TT-ranks of the weight matrix (in contrast to the matrix rank) allows to use much wider layers potentially leading to the greater expressive power of the model.
\par CP-decomposition algorithm was applied to compress convolution kernel in CNNs. And they also using properties of CP-decomposition to speed up the inference time.
tp speed up computation of matrix-by-vector, properties of the Kronecker product of matrices was exploited. These matrices have the same structure as TT-matrices with unit TT-ranks. We can generalize this idea to formulate the weight matrix with TT-matrices with unit TT-ranks.
The Tucker format and the canonical format will meet the curse of dimensionality, TT-format is immune to the cues of dimensionality and its algorithm are robust.
\par A $d$-dimensional array (tensor) $ \mathcal{A}$ is said to be TT-format if for each dimension $k=1,...,d$ and for each possible value of the $k$-th dimension index $j_k=1,...,n_k$ there exists a matrix $\mathbf{G}_k[j_k]$ such that all elements of $\mathcal{A}$ can be computed as the following matrix product:
\begin{equation}
\label{eq1}
\mathcal{A}(j_1,...,j_d)=\mathbf{G}_1[j_1]\mathbf{G}_2[j_2]\cdots\mathbf{G}_d[j_d]
\end{equation}
All the matrices $\mathbf{G}_k[j_k]$ related to the same dimension $k$ are restricted to be of the same size $r_{k-1}\times r_k$.
The values $r_0$ and $r_d$ equal to 1 in order to keep the matrix product (\ref{eq1}) of size $1\times 1$.
In what follows we refer to the representation of a tensor in the TT-format as the TT-representation or d the TT-decomposition.
The sequence $\{r_k \}^d_{k=0}$ is referred to as the TT-ranks of the TT-representation of $\mathbf{A}$
(or the ranks for short), its maximum – as the maximal TT-rank of the TT-representation $n$ of $\mathcal{A}: r = \max_{k=0,...,d} r_k$ .
The collections of the matrices $(\mathbf{G}_k [j_k ])_{j_k}^{n_k} =1$ corresponding to the same dimension (technically, 3-dimensional arrays $\mathcal{G}_k$ ) are called the cores.
We use the symbols $\mathbf{G}_k[j_k](\alpha_{k-1},\alpha_k)$ to denote the element of the matrix $\mathbf{G}_k[j_k]$ in the position $(\alpha_{k-1},\alpha_k)$,
where $\alpha_{k-1}=1,...,r_{k-1},\alpha_k=1,...,r_k$. Equation (\ref{eq1}) can be equivalently rewritten as the sum of the products of the elements of the cores:
\begin{equation}
\label{eq2}
\mathcal{A}(j_1,...,j_d)=\sum_{\alpha_0,...,\alpha_d} \mathbf{G}_1[j_1](\alpha_{0},\alpha_1)\cdots \mathbf{G}_d[j_d](\alpha_{d-1},\alpha_d)
\end{equation}
The representation of a tensor $\mathcal{A}$ via the explicit enumeration of all its elements requires to store
$\prod_{k=1}^d n_k $ numbers compared with $\sum_{k=1}^d n_kr_{k-1}r_k$ numbers if the tensor is stored in the TT-format.
Thus, the TT-format is very efficient in terms of memory if the ranks are small.
An attractive property of the TT-decomposition is the ability to efficiently perform several types of operations on tensors if they are in the TT-format: basic linear algebra operations, such as the addition of a constant and the multiplication by a constant, the summation and the entrywise product of tensors (the results of these operations are tensors in the TT-format generally with the increased ranks); computation of global characteristics of a tensor, such as the sum of all elements and the Frobenius norm.
See [17] for a detailed description of all the supported operations.
\par
Neural networks are usually trained with the stochastic gradient descent algorithm where the gradient is computed using the back-propagation procedure.
Back-propagation allows to compute the gradient of a loss-function $L$ with respect to all the parameters of the network.
The method starts with the computation of the gradient of $L$ w.r.t. the output of the last layer and proceeds sequentially through the layers in the reversed order while computing the gradient w.r.t. the parameters and the input of the layer making use of the gradients computed earlier.
Applied to tensorizing fully-connected layers the back-propagation method computes the gradients w.r.t. the input $\bf{x}$ and the parameters $\bf{W}$ and $\bf{b}$ given the gradients $\frac{\partial L}{\partial {\bf y}}$ w.r.t to the output ${\bf y}$:
\begin{equation}
\label{gradient}
\frac{\partial L}{\partial {\bf x}}={\bf W}^T \frac{\partial L}{\partial {\bf y}},
\frac{\partial L}{\partial {\bf W}}={\bf W}^T \frac{\partial L}{\partial {\bf y}}{\bf x}^T,
\frac{\partial L}{\partial {\bf b}}={\bf W}^T \frac{\partial L}{\partial {\bf y}}
\end{equation}
In what follows we derive the gradients required to use the back-propagation algorithm with the tensorizing layers.
To compute the gradient of the loss function w.r.t. the bias vector ${\bf b}$ and w.r.t. the input vector ${\bf x}$ one can use equations (\ref{gradient}).
The latter can be applied using the matrix-by-vector product (where the matrix is in the TT-format) with the complexity of $\mathcal{O}(dr^2n\max\{m,n\}^d)=\mathcal{O}(dr^2n\max\{M,N\})$.
To perform a step of stochastic gradient descent, we can use traditional back-propagation in computational graph to compute gradient of loss function w.r.t the weight matrix $\mathbf{W}$, then we convert the gradient matrix into the TT-format using TT-SVD algorithm. Another way to learn the TensorNet parameters is to
compute gradient of loss function w.r.t the cores of the TT-representations of $\mathbf{W}$.
\par For high-dimensional matrices, the TT-SVD algorithm will meet curse of dimensionality, i.e., computation cost will increase quickly such as exponentially. Then we have difficulty to employ TT-format in neural networks. To
A Randomized Tensor Train Singular Value
Decomposition
Each of the existing TT decomposition algorithms,
including the TT-SVD and randomized TT-SVD, is successful in the field, but
neither can both accurately and efficiently decompose large-scale sparse tensors. \cite{b13} proposes a new quasi-best fast TT
decomposition algorithm for large-scale sparse tensors with proven correctness
and the upper bound of its complexity is derived. In numerical experiments,
authors verify that the proposed algorithm can decompose sparse tensors faster than
the TT-SVD, and have more speed, precision and versatility than randomized
TT-SVD\cite{b14}, and it can be used to decomposes arbitrary high-dimensional tensor
without losing efficiency when the number of non-zero elements is limited.
Faster TT-SVD algorithm can be integrated into tensorizing neural networks, and it should be more efficiently to solve the problem in large scale.
\section{Experiments}
In all experiments we will use MATLAB extension\footnote{https://github.com/Bihaqo/TensorNet} of the MatConvNet framework\footnote{http://www.vlfeat.org/matconvnet}. For the operations related to the TT-format we use the TT-Toolbox\footnote{https://github.com/oseledets/TT-Toolbox} implemented in MATLAB as well.
To show the properties of the TT-layer and compare different strategies for setting its parameters: dimensions of the tensors representing the input/output of the layer and the TT-ranks of the compressed weight matrix.
We run the experiment on the MNIST dataset for the task of handwritten-digit recognition.
As a baseline we use a neural network with two fullyconnected layers (1024 hidden units) and rectified linear unit (ReLU) and compute error on the test set.
For more reshaping options we resize the original 28 × 28 images to 32 × 32.
Futhermore, we will train several networks differing in the parameters of the single TT-layer. The networks contain the following layers: the TT-layer with weight matrix of size 1024×1024, ReLU, the fully-connected layer with the weight matrix of size 1024 × 10.
We test different ways of reshaping the input/output tensors and try different ranks of the TT-layer.
As a simple compression baseline in the place of the TT-layer we use the fully-connected layer such that the rank of the weight matrix is bounded (implemented as follows: the two consecutive fully-connected layers with weight matrices of sizes 1024 × r and r ×1024, where r controls the matrix rank and the compression factor).
% \section{Ease of Use}
% \subsection{Maintaining the Integrity of the Specifications}
% The IEEEtran class file is used to format your paper and style the text. All margins,
% column widths, line spaces, and text fonts are prescribed; please do not
% alter them. You may note peculiarities. For example, the head margin
% measures proportionately more than is customary. This measurement
% and others are deliberate, using specifications that anticipate your paper
% as one part of the entire proceedings, and not as an independent document.
% Please do not revise any of the current designations.
% \section{Prepare Your Paper Before Styling}
% Before you begin to format your paper, first write and save the content as a
% separate text file. Complete all content and organizational editing before
% formatting. Please note sections \ref{AA}--\ref{SCM} below for more information on
% proofreading, spelling and grammar.
% Keep your text and graphic files separate until after the text has been
% formatted and styled. Do not number text heads---{\LaTeX} will do that
% for you.
% \subsection{Abbreviations and Acronyms}\label{AA}
% Define abbreviations and acronyms the first time they are used in the text,
% even after they have been defined in the abstract. Abbreviations such as
% IEEE, SI, MKS, CGS, ac, dc, and rms do not have to be defined. Do not use
% abbreviations in the title or heads unless they are unavoidable.
% \subsection{Units}
% \begin{itemize}
% \item Use either SI (MKS) or CGS as primary units. (SI units are encouraged.) English units may be used as secondary units (in parentheses). An exception would be the use of English units as identifiers in trade, such as ``3.5-inch disk drive''.
% \item Avoid combining SI and CGS units, such as current in amperes and magnetic field in oersteds. This often leads to confusion because equations do not balance dimensionally. If you must use mixed units, clearly state the units for each quantity that you use in an equation.
% \item Do not mix complete spellings and abbreviations of units: ``Wb/m\textsuperscript{2}'' or ``webers per square meter'', not ``webers/m\textsuperscript{2}''. Spell out units when they appear in text: ``. . . a few henries'', not ``. . . a few H''.
% \item Use a zero before decimal points: ``0.25'', not ``.25''. Use ``cm\textsuperscript{3}'', not ``cc''.)
% \end{itemize}
% \subsection{Equations}
% Number equations consecutively. To make your
% equations more compact, you may use the solidus (~/~), the exp function, or
% appropriate exponents. Italicize Roman symbols for quantities and variables,
% but not Greek symbols. Use a long dash rather than a hyphen for a minus
% sign. Punctuate equations with commas or periods when they are part of a
% sentence, as in:
% \begin{equation}
% a+b=\gamma\label{eq}
% \end{equation}
% Be sure that the
% symbols in your equation have been defined before or immediately following
% the equation. Use ``\eqref{eq}'', not ``Eq.~\eqref{eq}'' or ``equation \eqref{eq}'', except at
% the beginning of a sentence: ``Equation \eqref{eq} is . . .''
% \subsection{\LaTeX-Specific Advice}
% Please use ``soft'' (e.g., \verb|\eqref{Eq}|) cross references instead
% of ``hard'' references (e.g., \verb|(1)|). That will make it possible
% to combine sections, add equations, or change the order of figures or
% citations without having to go through the file line by line.
% Please don't use the \verb|{eqnarray}| equation environment. Use
% \verb|{align}| or \verb|{IEEEeqnarray}| instead. The \verb|{eqnarray}|
% environment leaves unsightly spaces around relation symbols.
% Please note that the \verb|{subequations}| environment in {\LaTeX}
% will increment the main equation counter even when there are no
% equation numbers displayed. If you forget that, you might write an
% article in which the equation numbers skip from (17) to (20), causing
% the copy editors to wonder if you've discovered a new method of
% counting.
% {\BibTeX} does not work by magic. It doesn't get the bibliographic
% data from thin air but from .bib files. If you use {\BibTeX} to produce a
% bibliography you must send the .bib files.
% {\LaTeX} can't read your mind. If you assign the same label to a
% subsubsection and a table, you might find that Table I has been cross
% referenced as Table IV-B3.
% {\LaTeX} does not have precognitive abilities. If you put a
% \verb|\label| command before the command that updates the counter it's
% supposed to be using, the label will pick up the last counter to be
% cross referenced instead. In particular, a \verb|\label| command
% should not go before the caption of a figure or a table.
% Do not use \verb|\nonumber| inside the \verb|{array}| environment. It
% will not stop equation numbers inside \verb|{array}| (there won't be
% any anyway) and it might stop a wanted equation number in the
% surrounding equation.
% \subsection{Some Common Mistakes}\label{SCM}
% \begin{itemize}
% \item The word ``data'' is plural, not singular.
% \item The subscript for the permeability of vacuum $\mu_{0}$, and other common scientific constants, is zero with subscript formatting, not a lowercase letter ``o''.
% \item In American English, commas, semicolons, periods, question and exclamation marks are located within quotation marks only when a complete thought or name is cited, such as a title or full quotation. When quotation marks are used, instead of a bold or italic typeface, to highlight a word or phrase, punctuation should appear outside of the quotation marks. A parenthetical phrase or statement at the end of a sentence is punctuated outside of the closing parenthesis (like this). (A parenthetical sentence is punctuated within the parentheses.)
% \item A graph within a graph is an ``inset'', not an ``insert''. The word alternatively is preferred to the word ``alternately'' (unless you really mean something that alternates).
% \item Do not use the word ``essentially'' to mean ``approximately'' or ``effectively''.
% \item In your paper title, if the words ``that uses'' can accurately replace the word ``using'', capitalize the ``u''; if not, keep using lower-cased.
% \item Be aware of the different meanings of the homophones ``affect'' and ``effect'', ``complement'' and ``compliment'', ``discreet'' and ``discrete'', ``principal'' and ``principle''.
% \item Do not confuse ``imply'' and ``infer''.
% \item The prefix ``non'' is not a word; it should be joined to the word it modifies, usually without a hyphen.
% \item There is no period after the ``et'' in the Latin abbreviation ``et al.''.
% \item The abbreviation ``i.e.'' means ``that is'', and the abbreviation ``e.g.'' means ``for example''.
% \end{itemize}
% An excellent style manual for science writers is \cite{b7}.
% \subsection{Authors and Affiliations}
% \textbf{The class file is designed for, but not limited to, six authors.} A
% minimum of one author is required for all conference articles. Author names
% should be listed starting from left to right and then moving down to the
% next line. This is the author sequence that will be used in future citations
% and by indexing services. Names should not be listed in columns nor group by
% affiliation. Please keep your affiliations as succinct as possible (for
% example, do not differentiate among departments of the same organization).
% \subsection{Identify the Headings}
% Headings, or heads, are organizational devices that guide the reader through
% your paper. There are two types: component heads and text heads.
% Component heads identify the different components of your paper and are not
% topically subordinate to each other. Examples include Acknowledgments and
% References and, for these, the correct style to use is ``Heading 5''. Use
% ``figure caption'' for your Figure captions, and ``table head'' for your
% table title. Run-in heads, such as ``Abstract'', will require you to apply a
% style (in this case, italic) in addition to the style provided by the drop
% down menu to differentiate the head from the text.
% Text heads organize the topics on a relational, hierarchical basis. For
% example, the paper title is the primary text head because all subsequent
% material relates and elaborates on this one topic. If there are two or more
% sub-topics, the next level head (uppercase Roman numerals) should be used
% and, conversely, if there are not at least two sub-topics, then no subheads
% should be introduced.
% \subsection{Figures and Tables}
% \paragraph{Positioning Figures and Tables} Place figures and tables at the top and
% bottom of columns. Avoid placing them in the middle of columns. Large
% figures and tables may span across both columns. Figure captions should be
% below the figures; table heads should appear above the tables. Insert
% figures and tables after they are cited in the text. Use the abbreviation
% ``Fig.~\ref{fig}'', even at the beginning of a sentence.
% \begin{table}[htbp]
% \caption{Table Type Styles}
% \begin{center}
% \begin{tabular}{|c|c|c|c|}
% \hline
% \textbf{Table}&\multicolumn{3}{|c|}{\textbf{Table Column Head}} \\
% \cline{2-4}
% \textbf{Head} & \textbf{\textit{Table column subhead}}& \textbf{\textit{Subhead}}& \textbf{\textit{Subhead}} \\
% \hline
% copy& More table copy$^{\mathrm{a}}$& & \\
% \hline
% \multicolumn{4}{l}{$^{\mathrm{a}}$Sample of a Table footnote.}
% \end{tabular}
% \label{tab1}
% \end{center}
% \end{table}
% \begin{figure}[htbp]
% \centerline{\includegraphics{fig1.png}}
% \caption{Example of a figure caption.}
% \label{fig}
% \end{figure}
% Figure Labels: Use 8 point Times New Roman for Figure labels. Use words
% rather than symbols or abbreviations when writing Figure axis labels to
% avoid confusing the reader. As an example, write the quantity
% ``Magnetization'', or ``Magnetization, M'', not just ``M''. If including
% units in the label, present them within parentheses. Do not label axes only
% with units. In the example, write ``Magnetization (A/m)'' or ``Magnetization
% \{A[m(1)]\}'', not just ``A/m''. Do not label axes with a ratio of
% quantities and units. For example, write ``Temperature (K)'', not
% ``Temperature/K''.
% \section*{Acknowledgment}
% The preferred spelling of the word ``acknowledgment'' in America is without
% an ``e'' after the ``g''. Avoid the stilted expression ``one of us (R. B.
% G.) thanks $\ldots$''. Instead, try ``R. B. G. thanks$\ldots$''. Put sponsor
% acknowledgments in the unnumbered footnote on the first page.
% \section*{References}
% Please number citations consecutively within brackets \cite{b1}. The
% sentence punctuation follows the bracket \cite{b2}. Refer simply to the reference
% number, as in \cite{b3}---do not use ``Ref. \cite{b3}'' or ``reference \cite{b3}'' except at
% the beginning of a sentence: ``Reference \cite{b3} was the first $\ldots$''
% Number footnotes separately in superscripts. Place the actual footnote at
% the bottom of the column in which it was cited. Do not put footnotes in the
% abstract or reference list. Use letters for table footnotes.
% Unless there are six authors or more give all authors' names; do not use
% ``et al.''. Papers that have not been published, even if they have been
% submitted for publication, should be cited as ``unpublished'' \cite{b4}. Papers
% that have been accepted for publication should be cited as ``in press'' \cite{b5}.
% Capitalize only the first word in a paper title, except for proper nouns and
% element symbols.
% For papers published in translation journals, please give the English
% citation first, followed by the original foreign-language citation \cite{b6}.
\begin{thebibliography}{00}
\bibitem{b1} A. Krizhevsky, I. Sutskever, and G. E. Hinton, ``Imagenet classification with deep convolutional neural networks,'' in Advances in Neural Information Processing Systems 25 (NIPS), 2012, pp. 1097–1105.
\bibitem{b2} K. Simonyan and A. Zisserman, “Very deep convolutional networks for large-scale image recognition,”in International Conference on Learning Representations (ICLR), 2015.
\bibitem{b3} O. Russakovsky, J. Deng, H. Su, J. Krause, S. Satheesh, S. Ma, Z. Huang, A. Karpathy, A. Khosla,M. Bernstein, A. C. Berg, and L. Fei-Fei, “Imagenet large scale visual recognition challenge,” International Journal of Computer Vision (IJCV), 2015.
\bibitem{b4} D. E. Rumelhart, G. E. Hinton, and R. J. Williams, “Learning representations by back-propagating errors,”Nature, vol. 323, no. 6088, pp. 533–536, 1986.
\bibitem{b5} A. Krizhevsky, “Learning multiple layers of features from tiny images,” Master’s thesis, Computer Science Department, University of Toronto, 2009.
\bibitem{b6} I. V. Oseledets, “Tensor-Train decomposition,” SIAM J. Scientific Computing, vol. 33, no. 5, pp. 2295–2317, 2011.
\bibitem{b7} L. de Lathauwer, B. de Moor, and J. Vandewalle, A multilinear singular value decomposition, SIAM J. Matrix Anal. Appl., 21 (2000), pp. 1253–1278.
\bibitem{b8} L. de Lathauwer, B. de Moor, and J. Vandewalle, On best rank-1 and rank-(R1, R2,...,RN ) approximation of high-order tensors, SIAM J. Matrix Anal. Appl., 21 (2000), pp. 1324–1342.
\bibitem{b9} R. Bro, PARAFAC: Tutorial and applications, Chemometrics Intell. Lab. Syst., 38 (1997), pp. 149–171.
\bibitem{b10} L. Grasedyck, Existence and computation of low Kronecker-rank approximations for large systems in tensor product structure, Computing, 72 (2004), pp. 247–265.
\bibitem{b11} Novikov, A., Podoprikhin, D., Osokin, A., Vetrov, D. P. (2015). Tensorizing neural networks. In Advances in neural information processing systems (pp. 442-450).
\bibitem{b12} J. Ba and R. Caruana, “Do deep nets really need to be deep?” in Advances in Neural Information Processing Systems 27 (NIPS), 2014, pp. 2654–2662.
\bibitem{b13} Li, L., Yu, W., Batselier, K. (2019). Faster Tensor Train Decomposition for Sparse Data. arXiv preprint arXiv:1908.02721.
\bibitem{b14} Huber, B., Schneider, R., Wolf, S. (2017). A randomized tensor train singular value decomposition. In Compressed Sensing and its Applications (pp. 261-290). Birkhäuser, Cham.
\end{thebibliography}
\vspace{12pt}
% \color{red}
% IEEE conference templates contain guidance text for composing and formatting conference papers. Please ensure that all template text is removed from your conference paper prior to submission to the conference. Failure to remove the template text from your paper may result in your paper not being published.
\end{document}
| {
"alphanum_fraction": 0.7611016297,
"avg_line_length": 79.688,
"ext": "tex",
"hexsha": "f4a813b0cf22c5c427af994b82e6a5715724d1f1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b436b26c1e1e468e05a37c79b342a82bc61c348a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "akatoshking/matrix-project",
"max_forks_repo_path": "Conference-LaTeX-template_10-17-19/He-Junjie-Zhang-jiaqiong-proposal.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b436b26c1e1e468e05a37c79b342a82bc61c348a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "akatoshking/matrix-project",
"max_issues_repo_path": "Conference-LaTeX-template_10-17-19/He-Junjie-Zhang-jiaqiong-proposal.tex",
"max_line_length": 1066,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b436b26c1e1e468e05a37c79b342a82bc61c348a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "akatoshking/matrix-project",
"max_stars_repo_path": "Conference-LaTeX-template_10-17-19/He-Junjie-Zhang-jiaqiong-proposal.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 7577,
"size": 29883
} |
% Options for packages loaded elsewhere
\PassOptionsToPackage{unicode}{hyperref}
\PassOptionsToPackage{hyphens}{url}
%
\documentclass[
]{article}
\title{Internet News and Consumer Engagement}
\author{Muhammad Daaboul}
\date{17 October 2021}
\usepackage{amsmath,amssymb}
\usepackage{lmodern}
\usepackage{iftex}
\ifPDFTeX
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provide euro and other symbols
\else % if luatex or xetex
\usepackage{unicode-math}
\defaultfontfeatures{Scale=MatchLowercase}
\defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1}
\fi
% Use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\IfFileExists{microtype.sty}{% use microtype if available
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\makeatletter
\@ifundefined{KOMAClassName}{% if non-KOMA class
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}}
}{% if KOMA class
\KOMAoptions{parskip=half}}
\makeatother
\usepackage{xcolor}
\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available
\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}}
\hypersetup{
pdftitle={Internet News and Consumer Engagement},
pdfauthor={Muhammad Daaboul},
hidelinks,
pdfcreator={LaTeX via pandoc}}
\urlstyle{same} % disable monospaced font for URLs
\usepackage[margin=1in]{geometry}
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\usepackage{longtable,booktabs,array}
\usepackage{calc} % for calculating minipage widths
% Correct order of tables after \paragraph or \subparagraph
\usepackage{etoolbox}
\makeatletter
\patchcmd\longtable{\par}{\if@noskipsec\mbox{}\fi\par}{}{}
\makeatother
% Allow footnotes in longtable head/foot
\IfFileExists{footnotehyper.sty}{\usepackage{footnotehyper}}{\usepackage{footnote}}
\makesavenoteenv{longtable}
\usepackage{graphicx}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
% Set default figure placement to htbp
\makeatletter
\def\fps@figure{htbp}
\makeatother
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{-\maxdimen} % remove section numbering
\ifLuaTeX
\usepackage{selnolig} % disable illegal ligatures
\fi
\begin{document}
\maketitle
\includegraphics{banner.png}
Ready to put your coding skills to the test? Join us for our Workspace
Competition.\\
For more information, visit
\href{https://datacamp.com/workspacecompetition}{datacamp.com/workspacecompetition}
\hypertarget{context}{%
\subsubsection{Context}\label{context}}
This dataset
(\href{https://www.kaggle.com/szymonjanowski/internet-articles-data-with-users-engagement}{source})
consists of data about news articles collected from Sept.~3, 2019 until
Nov.~4, 2019. Afterwards, it is enriched by Facebook engagement data,
such as number of shares, comments and reactions. It was first created
to predict the popularity of an article before it was published.
However, there is a lot more you can analyze; take a look at some
suggestions at the end of this template.
\hypertarget{load-packages}{%
\subsubsection{Load packages}\label{load-packages}}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(skimr)}
\FunctionTok{library}\NormalTok{(tidyverse)}
\end{Highlighting}
\end{Shaded}
\hypertarget{load-your-data}{%
\subsubsection{Load your Data}\label{load-your-data}}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{articles }\OtherTok{\textless{}{-}}\NormalTok{ readr}\SpecialCharTok{::}\FunctionTok{read\_csv}\NormalTok{(}\StringTok{\textquotesingle{}data/news\_articles.csv.gz\textquotesingle{}}\NormalTok{)}
\NormalTok{articles}\SpecialCharTok{$}\NormalTok{source\_id }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(articles}\SpecialCharTok{$}\NormalTok{source\_id)}
\NormalTok{articles}\SpecialCharTok{$}\NormalTok{source\_name }\OtherTok{\textless{}{-}} \FunctionTok{as.factor}\NormalTok{(articles}\SpecialCharTok{$}\NormalTok{source\_name)}
\FunctionTok{skim}\NormalTok{(articles) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{select}\NormalTok{(}\SpecialCharTok{{-}}\NormalTok{(numeric.p0}\SpecialCharTok{:}\NormalTok{numeric.p100)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{select}\NormalTok{(}\SpecialCharTok{{-}}\NormalTok{(complete\_rate))}
\end{Highlighting}
\end{Shaded}
\begin{longtable}[]{@{}ll@{}}
\caption{Data summary}\tabularnewline
\toprule
\endhead
Name & articles \\
Number of rows & 10437 \\
Number of columns & 15 \\
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_ & \\
Column type frequency: & \\
character & 6 \\
factor & 2 \\
numeric & 6 \\
POSIXct & 1 \\
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_ & \\
Group variables & None \\
\bottomrule
\end{longtable}
\textbf{Variable type: character}
\begin{longtable}[]{@{}lrrrrrr@{}}
\toprule
skim\_variable & n\_missing & min & max & empty & n\_unique &
whitespace \\
\midrule
\endhead
author & 1020 & 2 & 184 & 0 & 2580 & 0 \\
title & 2 & 5 & 250 & 0 & 9810 & 0 \\
description & 24 & 3 & 266 & 0 & 9173 & 0 \\
url & 1 & 34 & 325 & 0 & 10433 & 0 \\
url\_to\_image & 656 & 31 & 261 & 0 & 8363 & 0 \\
content & 1292 & 26 & 276 & 0 & 8385 & 0 \\
\bottomrule
\end{longtable}
\textbf{Variable type: factor}
\begin{longtable}[]{@{}lrlrl@{}}
\toprule
skim\_variable & n\_missing & ordered & n\_unique & top\_counts \\
\midrule
\endhead
source\_id & 0 & FALSE & 13 & reu: 1252, bbc: 1242, the: 1232, abc:
1139 \\
source\_name & 0 & FALSE & 13 & Reu: 1252, BBC: 1242, The: 1232, ABC:
1139 \\
\bottomrule
\end{longtable}
\textbf{Variable type: numeric}
\begin{longtable}[]{@{}lrrrl@{}}
\toprule
skim\_variable & n\_missing & mean & sd & hist \\
\midrule
\endhead
\ldots1 & 0 & 5218.00 & 3013.05 & ▇▇▇▇▇ \\
top\_article & 2 & 0.12 & 0.33 & ▇▁▁▁▁ \\
engagement\_reaction\_count & 118 & 381.40 & 4433.34 & ▇▁▁▁▁ \\
engagement\_comment\_count & 118 & 124.03 & 965.35 & ▇▁▁▁▁ \\
engagement\_share\_count & 118 & 196.24 & 1020.68 & ▇▁▁▁▁ \\
engagement\_comment\_plugin\_count & 118 & 0.01 & 0.27 & ▇▁▁▁▁ \\
\bottomrule
\end{longtable}
\textbf{Variable type: POSIXct}
\begin{longtable}[]{@{}lrlllr@{}}
\toprule
skim\_variable & n\_missing & min & max & median & n\_unique \\
\midrule
\endhead
published\_at & 1 & 2019-09-03 & 2019-10-03 17:49:31 & 2019-09-12
18:32:38 & 9439 \\
\bottomrule
\end{longtable}
\hypertarget{understand-your-data}{%
\subsubsection{Understand your data}\label{understand-your-data}}
\begin{longtable}[]{@{}
>{\raggedright\arraybackslash}p{(\columnwidth - 2\tabcolsep) * \real{0.30}}
>{\raggedright\arraybackslash}p{(\columnwidth - 2\tabcolsep) * \real{0.70}}@{}}
\toprule
\begin{minipage}[b]{\linewidth}\raggedright
Variable.
\end{minipage} & \begin{minipage}[b]{\linewidth}\raggedright
Description
\end{minipage} \\
\midrule
\endhead
source\_id & publisher unique identifier \\
source\_name & human-readable publisher name \\
author & article author \\
title & article headline \\
description & article short description \\
url & article URL from publisher website \\
url\_to\_image & URL to main image associated with the article \\
published\_at & exact time and date of publishing the article \\
content & unformatted content of the article truncated to 260
characters \\
top\_article & value indicating if article was listed as a top article
on publisher website \\
engagement\_reaction\_count & users reactions count for posts on
Facebook involving article URL \\
engagement\_comment\_count & users comments count for posts on Facebook
involving article URL \\
engagement\_share\_count & users shares count for posts on Facebook
involving article URL \\
engagement\_comment\_plugin\_count & Users comments count for Facebook
comment plugin on article website \\
\bottomrule
\end{longtable}
Now you can start to explore this dataset with the chance to win
incredible prices! Can't think of where to start? Try your hand at these
suggestions:
\begin{itemize}
\tightlist
\item
Extract useful insights and visualize them in the most interesting way
possible.
\item
Categorize the articles into different categories based on, for
example, sentiment.
\item
Cluster the news articles, authors or publishers based on, for
example, topic.
\item
Make a title generator based on data such as content, description,
etc.
\end{itemize}
\hypertarget{judging-criteria}{%
\subsubsection{Judging Criteria}\label{judging-criteria}}
\begin{longtable}[]{@{}lll@{}}
\toprule
CATEGORY & WEIGHTAGE & DETAILS \\
\midrule
\endhead
\textbf{Analysis} & 30\% & \\
\textbf{Results} & 30\% & \\
\textbf{Creativity} & 40\% & \\
\bottomrule
\end{longtable}
\hypertarget{understanding-our-dataset}{%
\subsection{Understanding our dataset}\label{understanding-our-dataset}}
We observe that the \texttt{articles} dataset has 10,436 articles that
have been published by 12 of the biggest names in the news industry. Our
objective is to gain a deeper understanding of the relationship between
these different news providers with particular emphasis on Reuters. We
intend to cover the following four main areas in our analysis:
\begin{itemize}
\tightlist
\item
Topic correlation
\item
Sentiment analysis
\item
Topic modeling for cluster analysis\\
\item
Understand the main triggers behind popularity
\end{itemize}
We present below an extract from the first row of this dataset to help
build the readers intuition of the main elements that will be effective
in our analysis.
\textbf{Source}: 10
\textbf{Author}: Reuters Editorial
\textbf{Published at}: 1567527740
\textbf{URL TO IMAGE}:
\includegraphics{car.png}
\textbf{Title}: NTSB says Autopilot engaged in 2018 California Tesla
crash
\textbf{Description}: The National Transportation Safety Board said
Tuesday a Tesla Model S was in Autopilot mode when it struck a fire
truck in Culver City, California -- one of a series of crashes the board
is investigating involving Tesla's driver assistance system.
\textbf{Content}: WASHINGTON (Reuters) - The National Transportation
Safety Board said Tuesday a Tesla Model S was in Autopilot mode when it
struck a fire truck in Culver City, California one of a series of
crashes the board is investigating involving Tesla's driver
assistance\ldots{} {[}+478 chars{]}
\textbf{Reaction}: 0 times
\includegraphics{facebook-reactions.png}
\textbf{Comments}: 0 times
\textbf{Shares}: 2528 times
This article however was not recorded to have had any reactions or
comments, but was shared 2528 times ! Other information that would also
be included but not found for article under id no. 1 would be:
\begin{itemize}
\tightlist
\item
engagement\_reaction\_count
\item
engagement\_comment\_count
\item
engagement\_comment\_plugin\_count
\end{itemize}
Now that we have a feel of how the contents we can also view these using
the \texttt{str()} function as shown below to view the first article's
content:
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{str}\NormalTok{(articles[}\DecValTok{1}\NormalTok{,])}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## tibble [1 x 15] (S3: tbl_df/tbl/data.frame)
## $ ...1 : num 0
## $ source_id : Factor w/ 13 levels "1","abc-news",..: 10
## $ source_name : Factor w/ 13 levels "460.0","ABC News",..: 10
## $ author : chr "Reuters Editorial"
## $ title : chr "NTSB says Autopilot engaged in 2018 California Tesla crash"
## $ description : chr "The National Transportation Safety Board said Tuesday a Tesla Model S was in Autopilot mode when it struck a fi"| __truncated__
## $ url : chr "https://www.reuters.com/article/us-tesla-crash-idUSKCN1VO22E"
## $ url_to_image : chr "https://s4.reutersmedia.net/resources/r/?m=02&d=20190903&t=2&i=1425817142&w=1200&r=LYNXNPEF821HS"
## $ published_at : POSIXct[1:1], format: "2019-09-03 16:22:20"
## $ content : chr "WASHINGTON (Reuters) - The National Transportation Safety Board said Tuesday a Tesla Model S was in Autopilot m"| __truncated__
## $ top_article : num 0
## $ engagement_reaction_count : num 0
## $ engagement_comment_count : num 0
## $ engagement_share_count : num 2528
## $ engagement_comment_plugin_count: num 0
\end{verbatim}
We will also use the \texttt{source\_name} column going forward rather
than the \texttt{source\_id} column. We have verified below that there
are no differences between these two columns, but have chosen the former
simply because it looks a lot neater and is more suitable for use for
our graphical representations going forward.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(source\_id, source\_name)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 13 x 3
## source_id source_name n
## <fct> <fct> <int>
## 1 1 460.0 1
## 2 abc-news ABC News 1139
## 3 al-jazeera-english Al Jazeera English 499
## 4 bbc-news BBC News 1242
## 5 business-insider Business Insider 1048
## 6 cbs-news CBS News 952
## 7 cnn CNN 1132
## 8 espn ESPN 82
## 9 newsweek Newsweek 539
## 10 reuters Reuters 1252
## 11 the-irish-times The Irish Times 1232
## 12 the-new-york-times The New York Times 986
## 13 the-wall-street-journal The Wall Street Journal 333
\end{verbatim}
We start by summarizing key the main article engagement metrics
gathered. It would be misleading to compare the total number of
reactions, comments, or shares irrespective of the number of articles
related to each news provider. We will therefore present the
\texttt{mean} which will allow us to form a reasonable comparison
between users different behaviours towards the different news source
providers.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(}\SpecialCharTok{!}\NormalTok{source\_name }\SpecialCharTok{==} \StringTok{"460.0"}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{group\_by}\NormalTok{(source\_name) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{summarise}\NormalTok{(}\AttributeTok{reaction =} \FunctionTok{mean}\NormalTok{(engagement\_reaction\_count),}
\AttributeTok{comment =} \FunctionTok{mean}\NormalTok{(engagement\_comment\_count),}
\AttributeTok{share =} \FunctionTok{mean}\NormalTok{(engagement\_share\_count)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{gather}\NormalTok{(}\AttributeTok{key =} \StringTok{"comment\_type"}\NormalTok{, }\AttributeTok{value =} \StringTok{"count"}\NormalTok{, }\SpecialCharTok{{-}}\NormalTok{source\_name, }\AttributeTok{na.rm =} \ConstantTok{TRUE}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ggplot}\NormalTok{(}\FunctionTok{aes}\NormalTok{(}\FunctionTok{fct\_reorder}\NormalTok{(source\_name, count), count, }\AttributeTok{fill=}\NormalTok{comment\_type)) }\SpecialCharTok{+}
\FunctionTok{geom\_col}\NormalTok{(}\AttributeTok{position =} \StringTok{"dodge"}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{coord\_flip}\NormalTok{() }\SpecialCharTok{+}
\FunctionTok{ylab}\NormalTok{(}\StringTok{""}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{xlab}\NormalTok{(}\StringTok{""}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{guides}\NormalTok{(}\AttributeTok{fill=}\FunctionTok{guide\_legend}\NormalTok{(}\AttributeTok{title=}\StringTok{"Reaction Types"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-4-1}
}
\caption{Figure 1 - Average number of reaction types per article}\label{fig:unnamed-chunk-4}
\end{figure}
Some really interesting observations to take here. Reuters users are
much more likely to share an article, but otherwise there is a defined
pattern where a \texttt{reaction} is the common engagement type,
followed by \texttt{comments} and then \texttt{shares}. Additionally,
CNN appears to be the most popular amongst the others.
\hypertarget{data-wrangling}{%
\subparagraph{\texorpdfstring{\textbf{Data
Wrangling}}{Data Wrangling}}\label{data-wrangling}}
We will start by doing some data cleansing and will remove the ``460.0''
as the results inferred here would be statistically invalid due to
having only one occurrence.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(janitor)}
\NormalTok{articles }\OtherTok{\textless{}{-}}\NormalTok{ articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{clean\_names}\NormalTok{() }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{rename}\NormalTok{(}\AttributeTok{id =}\NormalTok{ x1) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{id =} \FunctionTok{row\_number}\NormalTok{()) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(source\_name }\SpecialCharTok{!=} \StringTok{"460.0"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Now we'll create a unique article id by newspaper agency.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{clean\_articles }\OtherTok{\textless{}{-}}\NormalTok{ articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{group\_by}\NormalTok{(source\_name) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{reference =} \FunctionTok{row\_number}\NormalTok{()) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ungroup}\NormalTok{() }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{article\_id =} \FunctionTok{paste0}\NormalTok{(source\_name, }\StringTok{"\_"}\NormalTok{, reference)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{select}\NormalTok{(}\SpecialCharTok{{-}}\NormalTok{reference)}
\FunctionTok{str}\NormalTok{(clean\_articles[}\DecValTok{1}\NormalTok{,])}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## tibble [1 x 16] (S3: tbl_df/tbl/data.frame)
## $ id : int 1
## $ source_id : Factor w/ 13 levels "1","abc-news",..: 10
## $ source_name : Factor w/ 13 levels "460.0","ABC News",..: 10
## $ author : chr "Reuters Editorial"
## $ title : chr "NTSB says Autopilot engaged in 2018 California Tesla crash"
## $ description : chr "The National Transportation Safety Board said Tuesday a Tesla Model S was in Autopilot mode when it struck a fi"| __truncated__
## $ url : chr "https://www.reuters.com/article/us-tesla-crash-idUSKCN1VO22E"
## $ url_to_image : chr "https://s4.reutersmedia.net/resources/r/?m=02&d=20190903&t=2&i=1425817142&w=1200&r=LYNXNPEF821HS"
## $ published_at : POSIXct[1:1], format: "2019-09-03 16:22:20"
## $ content : chr "WASHINGTON (Reuters) - The National Transportation Safety Board said Tuesday a Tesla Model S was in Autopilot m"| __truncated__
## $ top_article : num 0
## $ engagement_reaction_count : num 0
## $ engagement_comment_count : num 0
## $ engagement_share_count : num 2528
## $ engagement_comment_plugin_count: num 0
## $ article_id : chr "Reuters_1"
\end{verbatim}
\hypertarget{tokenization}{%
\subparagraph{\texorpdfstring{\textbf{Tokenization}}{Tokenization}}\label{tokenization}}
Many of the principles used below have been utilized by the amazing
\texttt{tidytext} package which has been authored by both
\texttt{Silge\ and\ Robinson}. We will now arrange the dataset into a
one-token-per-row format using the \texttt{unnest\_tokens()} verb.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(tidytext)}
\NormalTok{tidy\_articles }\OtherTok{\textless{}{-}}\NormalTok{ clean\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{unnest\_tokens}\NormalTok{(word, description) }
\end{Highlighting}
\end{Shaded}
We will then anti-join stop words, which are common language fillers
such as \texttt{the} and \texttt{a} (Silge and Robinson) so our text
analysis is based on meaningful words.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{data}\NormalTok{(stop\_words)}
\NormalTok{tidy\_articles }\OtherTok{\textless{}{-}}\NormalTok{ tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{anti\_join}\NormalTok{(stop\_words)}
\NormalTok{tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(word, }\AttributeTok{sort =} \ConstantTok{TRUE}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 21,960 x 2
## word n
## <chr> <int>
## 1 news 1614
## 2 world 857
## 3 president 841
## 4 national 658
## 5 trump 614
## 6 top 596
## 7 u.s 593
## 8 video 509
## 9 online 480
## 10 coverage 449
## # ... with 21,950 more rows
\end{verbatim}
\hypertarget{text-correlation-analysis}{%
\subparagraph{\texorpdfstring{\textbf{Text correlation
analysis}}{Text correlation analysis}}\label{text-correlation-analysis}}
Now we create \texttt{prop}, which summarizes the proportion each word
has has been mentioned by each respective News provider.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prop }\OtherTok{\textless{}{-}}\NormalTok{ tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(word, source\_name, }\AttributeTok{sort =} \ConstantTok{TRUE}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{group\_by}\NormalTok{(source\_name) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{proportion =}\NormalTok{ n }\SpecialCharTok{/} \FunctionTok{sum}\NormalTok{(n)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ungroup}\NormalTok{() }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{select}\NormalTok{(}\SpecialCharTok{{-}}\NormalTok{n)}
\end{Highlighting}
\end{Shaded}
We then filter our newly created dataset for \texttt{Reuters} and use
that to depict a visual representation of the words that more likely to
mentioned to be mentioned by \texttt{Reuters} (i.e.~have a higher
proportion) vs.~words that are less likely to be mentioned (i.e.~having
a lower proportion). A lower proportion will indicate that there is a
higher possibility of occurrence by the other News providers.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{prop\_reuters }\OtherTok{\textless{}{-}}\NormalTok{ prop }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(source\_name }\SpecialCharTok{==} \StringTok{"Reuters"}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{select}\NormalTok{(word, proportion) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{rename}\NormalTok{(}\AttributeTok{Reuters =}\NormalTok{ proportion)}
\end{Highlighting}
\end{Shaded}
Before we visualise our results, we'll need to \texttt{left\_join()} the
original proportions to allow for a comparison as the
\texttt{prop\_reuters} dataset doesn't include these.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{tidy\_prop }\OtherTok{\textless{}{-}}\NormalTok{ prop }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{left\_join}\NormalTok{(prop\_reuters, }\AttributeTok{by =} \FunctionTok{c}\NormalTok{(}\StringTok{"word"}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
Now we are able to visualise our results\ldots{}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(scales)}
\FunctionTok{ggplot}\NormalTok{(tidy\_prop, }\FunctionTok{aes}\NormalTok{(}\AttributeTok{x =}\NormalTok{ proportion, }\AttributeTok{y =} \StringTok{\textasciigrave{}}\AttributeTok{Reuters}\StringTok{\textasciigrave{}}\NormalTok{, }
\AttributeTok{color =} \FunctionTok{abs}\NormalTok{(}\StringTok{\textasciigrave{}}\AttributeTok{Reuters}\StringTok{\textasciigrave{}} \SpecialCharTok{{-}}\NormalTok{ proportion))) }\SpecialCharTok{+}
\FunctionTok{geom\_abline}\NormalTok{(}\AttributeTok{color =} \StringTok{"gray40"}\NormalTok{, }\AttributeTok{lty =} \DecValTok{2}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{geom\_jitter}\NormalTok{(}\AttributeTok{alpha =} \FloatTok{0.1}\NormalTok{, }\AttributeTok{size =} \FloatTok{2.5}\NormalTok{, }\AttributeTok{width =} \FloatTok{0.3}\NormalTok{, }\AttributeTok{height =} \FloatTok{0.3}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{geom\_text}\NormalTok{(}\FunctionTok{aes}\NormalTok{(}\AttributeTok{label =}\NormalTok{ word), }\AttributeTok{check\_overlap =} \ConstantTok{TRUE}\NormalTok{, }\AttributeTok{vjust =} \FloatTok{1.5}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{scale\_x\_log10}\NormalTok{(}\AttributeTok{labels =} \FunctionTok{percent\_format}\NormalTok{()) }\SpecialCharTok{+}
\FunctionTok{scale\_y\_log10}\NormalTok{(}\AttributeTok{labels =} \FunctionTok{percent\_format}\NormalTok{()) }\SpecialCharTok{+}
\FunctionTok{scale\_color\_gradient}\NormalTok{(}\AttributeTok{limits =} \FunctionTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\FloatTok{0.001}\NormalTok{), }
\AttributeTok{low =} \StringTok{"darkslategray4"}\NormalTok{, }\AttributeTok{high =} \StringTok{"gray75"}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{facet\_wrap}\NormalTok{(}\SpecialCharTok{\textasciitilde{}}\NormalTok{source\_name, }\AttributeTok{ncol =} \DecValTok{3}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{theme}\NormalTok{(}\AttributeTok{legend.position=}\StringTok{"none"}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{labs}\NormalTok{(}\AttributeTok{y =} \StringTok{"Reuters proportions"}\NormalTok{, }\AttributeTok{x =} \ConstantTok{NULL}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-12-1}
}
\caption{Figure 2 - Comparing word freqencies of Reuters against other News services}\label{fig:unnamed-chunk-12}
\end{figure}
\emph{Interesting!} We can tell straight from the graphs that ESPN has
the lowest correlation against Reuters. This was expected, since
\texttt{ESPN}'s coverage is focused on sports-related content unlike the
other source providers which cover broader topic areas.
We will now quantify this correlation against \texttt{ESPN} to
substantiate our results and translate these into numbers using
\texttt{Pearson\textquotesingle{}s\ correlation} by calculating
statistic through \texttt{cor.test}.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{cor.test}\NormalTok{(}\AttributeTok{data =}\NormalTok{ tidy\_prop[tidy\_prop}\SpecialCharTok{$}\NormalTok{source\_name }\SpecialCharTok{==} \StringTok{"ESPN"}\NormalTok{,], }\SpecialCharTok{\textasciitilde{}}\NormalTok{ proportion }\SpecialCharTok{+} \StringTok{\textasciigrave{}}\AttributeTok{Reuters}\StringTok{\textasciigrave{}}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Pearson's product-moment correlation
##
## data: proportion and Reuters
## t = 4.1785, df = 452, p-value = 3.524e-05
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1026415 0.2799119
## sample estimates:
## cor
## 0.1928498
\end{verbatim}
Now, we'll write a function to pull \texttt{Reuters} correlations
against all other sources and test again for \texttt{ESPN}.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{correlation\_function }\OtherTok{\textless{}{-}} \ControlFlowTok{function}\NormalTok{(x) \{}
\FunctionTok{cor.test}\NormalTok{(}\AttributeTok{data =}\NormalTok{ tidy\_prop[tidy\_prop}\SpecialCharTok{$}\NormalTok{source\_name }\SpecialCharTok{==}\NormalTok{ x,], }\SpecialCharTok{\textasciitilde{}}\NormalTok{ proportion }\SpecialCharTok{+} \StringTok{\textasciigrave{}}\AttributeTok{Reuters}\StringTok{\textasciigrave{}}\NormalTok{)}
\NormalTok{\}}
\NormalTok{source\_list }\OtherTok{\textless{}{-}} \FunctionTok{as.vector}\NormalTok{(}\FunctionTok{unique}\NormalTok{(tidy\_prop}\SpecialCharTok{$}\NormalTok{source\_name))}
\NormalTok{correlation\_list }\OtherTok{\textless{}{-}} \FunctionTok{map}\NormalTok{(source\_list, correlation\_function) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{setNames}\NormalTok{(source\_list)}
\NormalTok{correlation\_list}\SpecialCharTok{$}\NormalTok{ESPN}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Pearson's product-moment correlation
##
## data: proportion and Reuters
## t = 4.1785, df = 452, p-value = 3.524e-05
## alternative hypothesis: true correlation is not equal to 0
## 95 percent confidence interval:
## 0.1026415 0.2799119
## sample estimates:
## cor
## 0.1928498
\end{verbatim}
\emph{Awesome!} We derived the exact same correlation but using a
function. We can now plot \texttt{Reuters} correlations against the
other News source providers.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(reshape2)}
\FunctionTok{library}\NormalTok{(forcats)}
\NormalTok{correlation\_df }\OtherTok{\textless{}{-}} \FunctionTok{melt}\NormalTok{(}\FunctionTok{lapply}\NormalTok{(correlation\_list, }\StringTok{\textasciigrave{}}\AttributeTok{[}\StringTok{\textasciigrave{}}\NormalTok{, }\FunctionTok{c}\NormalTok{(}\StringTok{\textquotesingle{}estimate\textquotesingle{}}\NormalTok{, }\StringTok{\textquotesingle{}p.value\textquotesingle{}}\NormalTok{)))}
\NormalTok{tidy\_correlations }\OtherTok{\textless{}{-}}\NormalTok{ correlation\_df }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(L2 }\SpecialCharTok{==} \StringTok{"estimate"}\NormalTok{, L1 }\SpecialCharTok{!=} \StringTok{"Reuters"}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{rename}\NormalTok{(}\AttributeTok{source\_name =}\NormalTok{ L1) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{select}\NormalTok{(}\SpecialCharTok{{-}}\NormalTok{L2) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ggplot}\NormalTok{(}\FunctionTok{aes}\NormalTok{(}\FunctionTok{fct\_reorder}\NormalTok{(source\_name, value),value)) }\SpecialCharTok{+}
\FunctionTok{geom\_col}\NormalTok{() }\SpecialCharTok{+}
\FunctionTok{coord\_flip}\NormalTok{() }\SpecialCharTok{+}
\FunctionTok{xlab}\NormalTok{(}\ConstantTok{NULL}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{ylab}\NormalTok{(}\StringTok{""}\NormalTok{)}
\NormalTok{tidy\_correlations}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-15-1}
}
\caption{Figure 3 - Reuters text correlation with other News sources}\label{fig:unnamed-chunk-15}
\end{figure}
Interesting to see \texttt{ABC} news with even lower correlation than
\texttt{ESPN}'s. A closer look at the plot above shows the words
\emph{world}, \emph{news}, \emph{video}, \emph{coverage} as clear
outliers causing ABC News to have this awfully low correlation even
though \texttt{ESPN}'s content is much more diverse.
\textbf{Note}: Our analysis results would have been much different had
we used the \texttt{content} column in the description dataset. However,
because results have been truncated to 260 characters we believe it
would be very difficult to infer meaningful relationships through
missing content, and therefore have based our analysis on the
\texttt{description} column which provides an excellent summary of the
article events, and is a lot more detailed than the \texttt{title}
column.
\hypertarget{topic-modeling}{%
\subsubsection{\texorpdfstring{\textbf{Topic
modeling}}{Topic modeling}}\label{topic-modeling}}
Now we move on to \texttt{topic\ modeling}, where our goal is to show
you the various topic clusters within the \texttt{articles} dataset by
article. We start by arranging our dataset into a tidy format.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{wordsbyArticle }\OtherTok{\textless{}{-}}\NormalTok{ tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(article\_id, word, }\AttributeTok{sort =} \ConstantTok{TRUE}\NormalTok{) }
\NormalTok{wordsbyArticle}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 142,737 x 3
## article_id word n
## <chr> <chr> <int>
## 1 Business Insider_688 iphone 6
## 2 Business Insider_461 iphone 5
## 3 Business Insider_477 iphone 5
## 4 Business Insider_486 iphone 5
## 5 Business Insider_561 11 5
## 6 Business Insider_561 iphone 5
## 7 Business Insider_640 litter 5
## 8 Business Insider_805 coca 5
## 9 Business Insider_805 cola 5
## 10 CNN_621 iphone 5
## # ... with 142,727 more rows
\end{verbatim}
We will now transform this into a dfm object using the
\texttt{cast\_dfm()} function from using the \texttt{tidytext}package
(Silge and Robinson). A dfm-class object is a sparse matrix
representation of the counts of features by document, and is needed to
apply the \texttt{stm} function, which is a method of unsupervised
classification widely accepted as suitable topic modeling method.
Due to the excruciating amount of time needed to knit the document with
the \texttt{stm}, we have opted to save the file into our workspace to
make the knitting process much smoother and quicker ! We have also saved
the seed number for reproducibility.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(tm)}
\FunctionTok{library}\NormalTok{(quanteda)}
\FunctionTok{library}\NormalTok{(stm)}
\NormalTok{articles\_dfm }\OtherTok{\textless{}{-}}\NormalTok{ wordsbyArticle }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{cast\_dfm}\NormalTok{(article\_id, word, n)}
\FunctionTok{set.seed}\NormalTok{(}\DecValTok{2021}\NormalTok{)}
\NormalTok{articles\_lda }\OtherTok{\textless{}{-}} \FunctionTok{stm}\NormalTok{(articles\_dfm, }\AttributeTok{K=}\DecValTok{6}\NormalTok{, }\AttributeTok{init.type =} \StringTok{"LDA"}\NormalTok{)}
\FunctionTok{saveRDS}\NormalTok{(articles\_lda, }\AttributeTok{file =} \StringTok{"articles\_lda.RDS"}\NormalTok{)}
\NormalTok{rownames\_dfm }\OtherTok{\textless{}{-}} \FunctionTok{rownames}\NormalTok{(articles\_dfm)}
\FunctionTok{saveRDS}\NormalTok{(rownames\_dfm, }\AttributeTok{file =} \StringTok{"rownames\_dfm.RDS"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{articles\_lda }\OtherTok{\textless{}{-}} \FunctionTok{readRDS}\NormalTok{(}\AttributeTok{file =} \StringTok{"articles\_lda.RDS"}\NormalTok{)}
\NormalTok{rownames\_dfm }\OtherTok{\textless{}{-}} \FunctionTok{readRDS}\NormalTok{(}\AttributeTok{file =} \StringTok{"rownames\_dfm.RDS"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Now, that we've successfully run the topic models, we will group these
into different categories. To avoid repeating the graph twice, we will
label these prior to visualising the top 10 words from each of the 6
topics.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{article\_topics }\OtherTok{\textless{}{-}} \FunctionTok{tidy}\NormalTok{(articles\_lda, }\AttributeTok{matrix =} \StringTok{"beta"}\NormalTok{)}
\NormalTok{topic\_theme }\OtherTok{\textless{}{-}}\NormalTok{ article\_topics }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{group\_by}\NormalTok{(topic) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{slice\_max}\NormalTok{(beta, }\AttributeTok{n =} \DecValTok{10}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ungroup}\NormalTok{() }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{arrange}\NormalTok{(topic, }\SpecialCharTok{{-}}\NormalTok{beta)}
\NormalTok{topic\_categories }\OtherTok{\textless{}{-}} \FunctionTok{tibble}\NormalTok{(}\AttributeTok{topic =} \DecValTok{1}\SpecialCharTok{:}\DecValTok{6}\NormalTok{, }\AttributeTok{topic\_category =} \FunctionTok{c}\NormalTok{(}\StringTok{"Exclusive content"}\NormalTok{, }\StringTok{"Economy and Trade"}\NormalTok{, }\StringTok{"Climate change"}\NormalTok{, }\StringTok{"US coverage"}\NormalTok{, }\StringTok{"International coverage"}\NormalTok{, }\StringTok{"UK coverage"}\NormalTok{))}
\NormalTok{topic\_theme }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{inner\_join}\NormalTok{(topic\_categories, }\AttributeTok{by =} \FunctionTok{c}\NormalTok{(}\StringTok{"topic"}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ggplot}\NormalTok{(}\FunctionTok{aes}\NormalTok{(}\FunctionTok{fct\_reorder}\NormalTok{(term, beta), beta, }\AttributeTok{fill =} \FunctionTok{factor}\NormalTok{(topic\_category))) }\SpecialCharTok{+}
\FunctionTok{geom\_col}\NormalTok{(}\AttributeTok{show.legend =} \ConstantTok{FALSE}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{facet\_wrap}\NormalTok{(}\SpecialCharTok{\textasciitilde{}}\NormalTok{topic\_category, }\AttributeTok{scales =} \StringTok{"free"}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{coord\_flip}\NormalTok{() }\SpecialCharTok{+}
\FunctionTok{ylab}\NormalTok{(}\StringTok{""}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{xlab}\NormalTok{(}\StringTok{""}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-19-1}
}
\caption{Figure 4 - Terms that are most common within each of the six topics}\label{fig:unnamed-chunk-19}
\end{figure}
Even though there is some judgement involved surrounding the
nomenclature of these topic categories, we can clearly distinguish
certain topic areas such as \emph{Economy and Trade}, \emph{Climate
change}, \emph{US coverage}, and \emph{UK coverage}. Other areas such as
\emph{International coverage} are more likely disputed but most probably
accepted to fit a range of other topics. This opens the possibility to
perform topic modelling using a k \textgreater{} 6. Also,
\emph{Exclusive} sounds a bit vague and not well defined but we believe
it is driven by the \texttt{description} column in the dataset being
limited to certain characters. However, for our purposes we will keep
this to examine later on whether exclusive content has a significant
impact on popularity.
Now we'll turn our focus to \texttt{gamma}, which moves away slightly
from a word focus and is an allocation of various topics within a single
document (Silge and Robinson). So in our case, this would be the topics
allocation which each article from our articles dataset (i.e.~allocation
by \texttt{article\_id})
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{articles\_gamma }\OtherTok{\textless{}{-}} \FunctionTok{tidy}\NormalTok{(articles\_lda, }\AttributeTok{matrix =} \StringTok{"gamma"}\NormalTok{, }\AttributeTok{document\_names =}\NormalTok{ rownames\_dfm)}
\NormalTok{articles\_gamma\_sliced }\OtherTok{\textless{}{-}}\NormalTok{ articles\_gamma }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{group\_by}\NormalTok{(document) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{slice\_max}\NormalTok{(}\AttributeTok{order\_by =}\NormalTok{ gamma, }\AttributeTok{n=}\DecValTok{1}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ungroup}\NormalTok{() }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{arrange}\NormalTok{(}\SpecialCharTok{{-}}\NormalTok{gamma) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{inner\_join}\NormalTok{(topic\_categories) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{rename}\NormalTok{(}\AttributeTok{article\_id =}\NormalTok{ document)}
\NormalTok{articles\_gamma\_sliced }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{separate}\NormalTok{(article\_id, }\FunctionTok{c}\NormalTok{(}\StringTok{\textquotesingle{}source\_name\textquotesingle{}}\NormalTok{, }\StringTok{\textquotesingle{}article\_number\textquotesingle{}}\NormalTok{), }\AttributeTok{sep=}\StringTok{"\_"}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(source\_name, topic, topic\_category) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ggplot}\NormalTok{(}\FunctionTok{aes}\NormalTok{(source\_name, n, }\AttributeTok{fill =} \FunctionTok{factor}\NormalTok{(topic\_category))) }\SpecialCharTok{+}
\FunctionTok{geom\_col}\NormalTok{(}\AttributeTok{position =} \StringTok{"fill"}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{coord\_flip}\NormalTok{() }\SpecialCharTok{+}
\FunctionTok{guides}\NormalTok{(}\AttributeTok{fill=}\FunctionTok{guide\_legend}\NormalTok{(}\AttributeTok{title=}\StringTok{"Topic categories"}\NormalTok{)) }\SpecialCharTok{+}
\FunctionTok{ylab}\NormalTok{(}\StringTok{""}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{xlab}\NormalTok{(}\StringTok{""}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-20-1}
}
\caption{Figure 5 - Weighting of our clustered topic categories amongst the various News providers}\label{fig:unnamed-chunk-20}
\end{figure}
The results above are sensical. \texttt{ESPN}'s UK coverage is a result
from the its UK premiere league coverage. \texttt{BBC} obviously has
higher UK coverage than its counterparts. \texttt{WSJ} and the
\texttt{Business\ Insider} are mostly focused on Economy and Trade as
would be expected. \texttt{CNN} understandably has a much higher US
coverage than any of its counterparts.
\hypertarget{sentiment-analysis}{%
\subsubsection{Sentiment Analysis}\label{sentiment-analysis}}
Now we'll perform sentiment analysis using the \texttt{bing} dataset
(Silge and Robinson). This lexicon provides a two-category sentiment
which will be useful for our analysis. Using the \texttt{tidytext}
package, we can obtain this lexicon by calling
\texttt{get\_sentiments()}
\begin{verbatim}
## # A tibble: 6,786 x 2
## word sentiment
## <chr> <chr>
## 1 2-faces negative
## 2 abnormal negative
## 3 abolish negative
## 4 abominable negative
## 5 abominably negative
## 6 abominate negative
## 7 abomination negative
## 8 abort negative
## 9 aborted negative
## 10 aborts negative
## # ... with 6,776 more rows
\end{verbatim}
Here, we'll \texttt{inner\_join} the \texttt{bing} dataset to our tidied
set, thereby eliminating any mismatch in words between these two
datsets. We will also assign a \texttt{sentiment} score based on the
frequency of either positive or negative words within each
\texttt{article\_id}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{sentiment\_bing }\OtherTok{\textless{}{-}}\NormalTok{ tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{inner\_join}\NormalTok{(}\FunctionTok{get\_sentiments}\NormalTok{(}\StringTok{"bing"}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(article\_id, source\_name, sentiment) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{spread}\NormalTok{(sentiment, n, }\AttributeTok{fill =} \DecValTok{0}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{sentiment =}\NormalTok{ positive }\SpecialCharTok{{-}}\NormalTok{ negative)}
\NormalTok{sentiment\_bing}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 7,515 x 5
## article_id source_name negative positive sentiment
## <chr> <fct> <dbl> <dbl> <dbl>
## 1 ABC News_1 ABC News 4 1 -3
## 2 ABC News_10 ABC News 1 1 0
## 3 ABC News_1001 ABC News 2 2 0
## 4 ABC News_1002 ABC News 1 1 0
## 5 ABC News_1004 ABC News 0 2 2
## 6 ABC News_1005 ABC News 2 0 -2
## 7 ABC News_1006 ABC News 1 0 -1
## 8 ABC News_1007 ABC News 0 1 1
## 9 ABC News_1008 ABC News 1 1 0
## 10 ABC News_1009 ABC News 1 1 0
## # ... with 7,505 more rows
\end{verbatim}
\hypertarget{wordcloud}{%
\subsubsection{Wordcloud}\label{wordcloud}}
Now that we have a tidied dataset, we can plot a wordcloud with the
\texttt{reshape2} and \texttt{wordcloud} packages. We can try to
understand the main sentiment drivers (either positive or negative)
within the whole dateset.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(reshape2)}
\FunctionTok{library}\NormalTok{(wordcloud)}
\NormalTok{tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{inner\_join}\NormalTok{(}\FunctionTok{get\_sentiments}\NormalTok{(}\StringTok{"bing"}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(word, sentiment, }\AttributeTok{sort =} \ConstantTok{TRUE}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{acast}\NormalTok{(word }\SpecialCharTok{\textasciitilde{}}\NormalTok{ sentiment, }\AttributeTok{value.var =} \StringTok{"n"}\NormalTok{, }\AttributeTok{fill =} \DecValTok{0}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{comparison.cloud}\NormalTok{(}\AttributeTok{colors =} \FunctionTok{c}\NormalTok{(}\StringTok{"gray20"}\NormalTok{,}\StringTok{"gray80"}\NormalTok{), }\AttributeTok{max.words =} \DecValTok{75}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-23-1}
}
\caption{Figure 6 - Most common positive and negative words in all articles}\label{fig:unnamed-chunk-23}
\end{figure}
Some of these negative words are distressing, so we can filter our
dataset to \texttt{ESPN} to find more lighthearted indicators of either
negative or positive sentiment.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{inner\_join}\NormalTok{(}\FunctionTok{get\_sentiments}\NormalTok{(}\StringTok{"bing"}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(word, sentiment, source\_name, }\AttributeTok{sort =} \ConstantTok{TRUE}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(source\_name }\SpecialCharTok{==} \StringTok{"ESPN"}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{acast}\NormalTok{(word }\SpecialCharTok{\textasciitilde{}}\NormalTok{ sentiment, }\AttributeTok{value.var =} \StringTok{"n"}\NormalTok{, }\AttributeTok{fill =} \DecValTok{0}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{comparison.cloud}\NormalTok{(}\AttributeTok{colors =} \FunctionTok{c}\NormalTok{(}\StringTok{"gray20"}\NormalTok{,}\StringTok{"gray80"}\NormalTok{), }\AttributeTok{max.words =} \DecValTok{120}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-24-1}
}
\caption{Figure 7 - Most common positive and negative words in ESPN articles}\label{fig:unnamed-chunk-24}
\end{figure}
This is more like it, and why people love watching sports!
\hypertarget{term-frequency}{%
\subsubsection{Term Frequency}\label{term-frequency}}
We can also obtain a deeper understanding of the topic areas covered by
each News agency by calculating term frequency, which is the number of
times our words apprear within a document. We will use this concept to
depict the top 15 words most commonly used by each News provider. This
can be performed using the \texttt{bind\_tf\_idf} verb.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{article\_words }\OtherTok{\textless{}{-}}\NormalTok{ tidy\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(source\_name, word, }\AttributeTok{sort =} \ConstantTok{TRUE}\NormalTok{)}
\NormalTok{article\_words }\OtherTok{\textless{}{-}}\NormalTok{ article\_words }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{bind\_tf\_idf}\NormalTok{(word, source\_name, n) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{arrange}\NormalTok{(}\SpecialCharTok{{-}}\NormalTok{tf\_idf)}
\NormalTok{article\_words }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{group\_by}\NormalTok{(source\_name) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{slice\_max}\NormalTok{(tf\_idf, }\AttributeTok{n =} \DecValTok{15}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\NormalTok{ ungroup }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ggplot}\NormalTok{(}\FunctionTok{aes}\NormalTok{(word, tf\_idf, }\AttributeTok{fill =}\NormalTok{ source\_name)) }\SpecialCharTok{+}
\FunctionTok{geom\_col}\NormalTok{(}\AttributeTok{show.legend =} \ConstantTok{FALSE}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{facet\_wrap}\NormalTok{(}\SpecialCharTok{\textasciitilde{}}\NormalTok{source\_name, }\AttributeTok{ncol =} \DecValTok{3}\NormalTok{, }\AttributeTok{scales =} \StringTok{"free"}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{labs}\NormalTok{(}\AttributeTok{x =} \ConstantTok{NULL}\NormalTok{, }\AttributeTok{y =} \StringTok{""}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{coord\_flip}\NormalTok{()}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-25-1}
}
\caption{Figure 8 - Highest tf-idf words amongst the dataset's news sources}\label{fig:unnamed-chunk-25}
\end{figure}
Interesting to see 48 which is the 48 Hours show on CBS, the source name
and their websites is also obvious and expected. There is more football
coverage under BBC news by the words \emph{trafford} and
\emph{tottenham}, but probably needed a higher k to arrive at a sports
cluster. However, the main limitation behind this could possibly be the
diverse topics covered by ESPN such as NFL and basketball vs.~mainly
Olympics and football coverage by BBC.
\hypertarget{n-grams-and-correlation}{%
\subsubsection{N-grams and correlation}\label{n-grams-and-correlation}}
Our focus so far has been on \emph{unigrams}, however we could certainly
derive a lot more from \emph{bigrams}. Bigrams allow us to visualise the
connectivity between these words using the \texttt{igraph} package.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{article\_bigrams }\OtherTok{\textless{}{-}}\NormalTok{ clean\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{unnest\_tokens}\NormalTok{(bigrams, description, }\AttributeTok{token =} \StringTok{"ngrams"}\NormalTok{, }\AttributeTok{n =} \DecValTok{2}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
Now we have unnested the \texttt{description} columns by bigrams, we can
count the frequency of unique combinations.
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{data}\NormalTok{(stop\_words)}
\NormalTok{article\_bigrams }\OtherTok{\textless{}{-}}\NormalTok{ article\_bigrams }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{count}\NormalTok{(bigrams, }\AttributeTok{sort =} \ConstantTok{TRUE}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
We will now filter out any stopword occurrences after splitting the
\texttt{bigrams} into two columns for each of the two words.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{bigrams\_split }\OtherTok{\textless{}{-}}\NormalTok{article\_bigrams }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{separate}\NormalTok{(bigrams, }\FunctionTok{c}\NormalTok{(}\StringTok{"word1"}\NormalTok{,}\StringTok{"word2"}\NormalTok{), }\AttributeTok{sep=} \StringTok{" "}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(}\SpecialCharTok{!}\NormalTok{word1 }\SpecialCharTok{\%in\%}\NormalTok{ stop\_words}\SpecialCharTok{$}\NormalTok{word) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(}\SpecialCharTok{!}\NormalTok{word2 }\SpecialCharTok{\%in\%}\NormalTok{ stop\_words}\SpecialCharTok{$}\NormalTok{word) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{arrange}\NormalTok{(}\FunctionTok{desc}\NormalTok{(n))}
\end{Highlighting}
\end{Shaded}
Using the \texttt{igraph} package we can now visualise connectivity
between words that have occurred more than 15 times. We will also
increase the thickness of the links depending on the frequency of these
bigrams by assigning \texttt{edge\_with\ =\ n} within
\texttt{geom\_edge\_link()}. \emph{Really cool stuff!}
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(ggraph)}
\FunctionTok{library}\NormalTok{(igraph)}
\FunctionTok{library}\NormalTok{(grid)}
\NormalTok{bigram\_igraph }\OtherTok{\textless{}{-}}\NormalTok{ bigrams\_split }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(n }\SpecialCharTok{\textgreater{}} \DecValTok{15}\NormalTok{) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(}\SpecialCharTok{!}\FunctionTok{is.na}\NormalTok{(word1)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{filter}\NormalTok{(}\SpecialCharTok{!}\FunctionTok{is.na}\NormalTok{(word2)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{graph\_from\_data\_frame}\NormalTok{() }
\FunctionTok{set.seed}\NormalTok{(}\DecValTok{1234}\NormalTok{)}
\FunctionTok{ggraph}\NormalTok{(bigram\_igraph, }\AttributeTok{layout =} \StringTok{"fr"}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{geom\_edge\_link}\NormalTok{(}\FunctionTok{aes}\NormalTok{(}\AttributeTok{edge\_alpha =}\NormalTok{ n, }\AttributeTok{edge\_width =}\NormalTok{ n), }\AttributeTok{edge\_colour =} \StringTok{"cyan4"}\NormalTok{, }\AttributeTok{end\_cap =} \FunctionTok{circle}\NormalTok{(}\FloatTok{0.07}\NormalTok{, }\StringTok{\textquotesingle{}inches\textquotesingle{}}\NormalTok{)) }\SpecialCharTok{+}
\FunctionTok{geom\_node\_point}\NormalTok{(}\AttributeTok{color =} \StringTok{"lightblue"}\NormalTok{, }\AttributeTok{size =} \DecValTok{5}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{geom\_node\_text}\NormalTok{(}\FunctionTok{aes}\NormalTok{(}\AttributeTok{label =}\NormalTok{ name), }\AttributeTok{vjust =} \DecValTok{1}\NormalTok{, }\AttributeTok{hjust =} \DecValTok{1}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{theme\_void}\NormalTok{()}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-29-1}
}
\caption{Figure 9 - Most recurring bigrams in all articles that have occurred at least 15 times}\label{fig:unnamed-chunk-29}
\end{figure}
We can clearly tell a story now. For example, one particular theme
surrounding US coverage was on former President Donald Trump and his
administration whilst Vice President \emph{(now President)} Joe Biden
was the runner-up.
\hypertarget{putting-the-final-pieces-together}{%
\subsubsection{Putting the final pieces
together}\label{putting-the-final-pieces-together}}
We will now combine various pieces from our final products above to form
a meaningful relationship through a linear regression model. Our aim to
explain \emph{popularity} depending on the \textbf{source name},
\textbf{sentiment}, \textbf{topic category} and \textbf{image status},
which we plan to add further below.
\hypertarget{popularity-by-source-name}{%
\paragraph{Popularity by source name}\label{popularity-by-source-name}}
We finally attempt to visualise popularity by the various sources.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{clean\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{popularity =}\NormalTok{ engagement\_reaction\_count }\SpecialCharTok{+}\NormalTok{ engagement\_comment\_count }\SpecialCharTok{+}\NormalTok{ engagement\_share\_count }\SpecialCharTok{+}\NormalTok{ engagement\_comment\_plugin\_count) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{group\_by}\NormalTok{(source\_name) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{summarise}\NormalTok{(}\AttributeTok{popularity =} \FunctionTok{mean}\NormalTok{(popularity, }\AttributeTok{na.rm =}\ConstantTok{TRUE}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ggplot}\NormalTok{(}\FunctionTok{aes}\NormalTok{(}\FunctionTok{fct\_reorder}\NormalTok{(source\_name, popularity), popularity, }\AttributeTok{fill =}\NormalTok{ source\_name)) }\SpecialCharTok{+}
\FunctionTok{geom\_col}\NormalTok{(}\AttributeTok{show.legend =} \ConstantTok{FALSE}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{coord\_flip}\NormalTok{() }\SpecialCharTok{+}
\FunctionTok{ylab}\NormalTok{(}\StringTok{""}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{xlab}\NormalTok{(}\StringTok{""}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-30-1}
}
\caption{Figure 10 - Average popularity amongst the different news service providers}\label{fig:unnamed-chunk-30}
\end{figure}
This clearly shows that the New York Times and CNN are clear high flyers
in terms of popularity, with both the Irish Times and ESPN way below
average. This will help us regroup the middle categories as one as they
all fall within range in terms of popularity (i.e.~\emph{CBS News},
\emph{Al Jazeera English}, \emph{Newsweek}, \emph{Business Insider},
\emph{BBC News}, \emph{ABC News}, \emph{The Wall Street Journal}, and
\emph{Reuters})
\hypertarget{sentiment}{%
\subparagraph{Sentiment}\label{sentiment}}
We will also have a look at sentiment across the board
\begin{Shaded}
\begin{Highlighting}[]
\FunctionTok{library}\NormalTok{(lubridate)}
\NormalTok{clean\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{left\_join}\NormalTok{(}
\FunctionTok{select}\NormalTok{(sentiment\_bing, }\FunctionTok{c}\NormalTok{(}\StringTok{"article\_id"}\NormalTok{, }\StringTok{"sentiment"}\NormalTok{)}
\NormalTok{ )) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{ggplot}\NormalTok{(}\FunctionTok{aes}\NormalTok{(sentiment, }\AttributeTok{fill =}\NormalTok{ source\_name)) }\SpecialCharTok{+}
\FunctionTok{geom\_density}\NormalTok{(}\AttributeTok{show.legend =} \ConstantTok{FALSE}\NormalTok{) }\SpecialCharTok{+}
\FunctionTok{facet\_wrap}\NormalTok{(}\SpecialCharTok{\textasciitilde{}}\NormalTok{source\_name) }\SpecialCharTok{+}
\FunctionTok{xlab}\NormalTok{(}\StringTok{""}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{figure}
{\centering \includegraphics{index_files/figure-latex/unnamed-chunk-31-1}
}
\caption{Figure 11 - Sentiment score distribution by News provider}\label{fig:unnamed-chunk-31}
\end{figure}
Overall sentiment looks balanced. We will take an easy shortcut by
assuming sentiment is fairly similar for now. Now, it's time for data
wrangling before we apply our lm model.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model\_set }\OtherTok{\textless{}{-}}\NormalTok{ clean\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{popularity =}\NormalTok{ engagement\_reaction\_count }\SpecialCharTok{+}\NormalTok{ engagement\_comment\_count }\SpecialCharTok{+}\NormalTok{ engagement\_share\_count }\SpecialCharTok{+}\NormalTok{ engagement\_comment\_plugin\_count) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{left\_join}\NormalTok{(}
\FunctionTok{select}\NormalTok{(sentiment\_bing, }\FunctionTok{c}\NormalTok{(}\StringTok{"article\_id"}\NormalTok{, }\StringTok{"sentiment"}\NormalTok{)}
\NormalTok{ )) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{source\_name =} \FunctionTok{case\_when}\NormalTok{(}
\NormalTok{ source\_name }\SpecialCharTok{==} \StringTok{"CNN"} \SpecialCharTok{\textasciitilde{}} \StringTok{"CNN"}\NormalTok{,}
\NormalTok{ source\_name }\SpecialCharTok{==} \StringTok{"The Irish Times"} \SpecialCharTok{\textasciitilde{}} \StringTok{"The Irish Times"}\NormalTok{,}
\NormalTok{ source\_name }\SpecialCharTok{==} \StringTok{"The New York Times"} \SpecialCharTok{\textasciitilde{}} \StringTok{"The New York Times"}\NormalTok{,}
\NormalTok{ source\_name }\SpecialCharTok{==} \StringTok{"ESPN"} \SpecialCharTok{\textasciitilde{}} \StringTok{"ESPN"}\NormalTok{,}
\ConstantTok{TRUE} \SpecialCharTok{\textasciitilde{}} \StringTok{"Other"}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{sentiment\_cat =} \FunctionTok{case\_when}\NormalTok{(}
\NormalTok{ sentiment }\SpecialCharTok{\textgreater{}} \DecValTok{0} \SpecialCharTok{\textasciitilde{}} \StringTok{"positive"}\NormalTok{,}
\NormalTok{ sentiment }\SpecialCharTok{\textless{}} \DecValTok{0} \SpecialCharTok{\textasciitilde{}} \StringTok{"negative"}\NormalTok{,}
\ConstantTok{TRUE} \SpecialCharTok{\textasciitilde{}} \StringTok{"neutral"}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{image\_status =} \FunctionTok{if\_else}\NormalTok{(}\FunctionTok{is.na}\NormalTok{(url\_to\_image), }\DecValTok{0}\NormalTok{, }\DecValTok{1}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{left\_join}\NormalTok{(articles\_gamma\_sliced)}
\NormalTok{model\_lm }\OtherTok{\textless{}{-}} \FunctionTok{lm}\NormalTok{(}\AttributeTok{formula =}\NormalTok{ popularity }\SpecialCharTok{\textasciitilde{}}\NormalTok{ sentiment\_cat }\SpecialCharTok{+}\NormalTok{ topic\_category }\SpecialCharTok{+}\NormalTok{ source\_name }\SpecialCharTok{+}\NormalTok{ image\_status, }\AttributeTok{data =}\NormalTok{ model\_set)}
\FunctionTok{summary}\NormalTok{(model\_lm)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Call:
## lm(formula = popularity ~ sentiment_cat + topic_category + source_name +
## image_status, data = model_set)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2211 -742 -475 -72 432644
##
## Coefficients:
## Estimate Std. Error
## (Intercept) 1074.09 346.25
## sentiment_catneutral 40.51 138.39
## sentiment_catpositive -165.15 161.50
## topic_categoryEconomy and Trade -138.35 202.16
## topic_categoryExclusive content -322.35 238.58
## topic_categoryInternational coverage 100.59 203.45
## topic_categoryUK coverage -89.16 210.20
## topic_categoryUS coverage 600.01 208.24
## source_nameESPN -1494.24 692.71
## source_nameOther -957.37 195.31
## source_nameThe Irish Times -1505.12 257.40
## source_nameThe New York Times -380.09 262.83
## image_status 496.51 254.12
## t value Pr(>|t|)
## (Intercept) 3.102 0.00193
## sentiment_catneutral 0.293 0.76974
## sentiment_catpositive -1.023 0.30653
## topic_categoryEconomy and Trade -0.684 0.49376
## topic_categoryExclusive content -1.351 0.17669
## topic_categoryInternational coverage 0.494 0.62100
## topic_categoryUK coverage -0.424 0.67145
## topic_categoryUS coverage 2.881 0.00397
## source_nameESPN -2.157 0.03102
## source_nameOther -4.902 9.65e-07
## source_nameThe Irish Times -5.847 5.14e-09
## source_nameThe New York Times -1.446 0.14817
## image_status 1.954 0.05074
##
## (Intercept) **
## sentiment_catneutral
## sentiment_catpositive
## topic_categoryEconomy and Trade
## topic_categoryExclusive content
## topic_categoryInternational coverage
## topic_categoryUK coverage
## topic_categoryUS coverage **
## source_nameESPN *
## source_nameOther ***
## source_nameThe Irish Times ***
## source_nameThe New York Times
## image_status .
## ---
## Signif. codes:
## 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 6027 on 10306 degrees of freedom
## (117 observations deleted due to missingness)
## Multiple R-squared: 0.008083, Adjusted R-squared: 0.006928
## F-statistic: 6.999 on 12 and 10306 DF, p-value: 8.14e-13
\end{verbatim}
The New York Times didn't turn to very significant, so we will regroup
with other. We will also categorise content as being either US coverage
or other
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{final\_model\_set }\OtherTok{\textless{}{-}}\NormalTok{ clean\_articles }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{popularity =}\NormalTok{ engagement\_reaction\_count }\SpecialCharTok{+}\NormalTok{ engagement\_comment\_count }\SpecialCharTok{+}\NormalTok{ engagement\_share\_count }\SpecialCharTok{+}\NormalTok{ engagement\_comment\_plugin\_count) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{left\_join}\NormalTok{(}
\FunctionTok{select}\NormalTok{(sentiment\_bing, }\FunctionTok{c}\NormalTok{(}\StringTok{"article\_id"}\NormalTok{, }\StringTok{"sentiment"}\NormalTok{)}
\NormalTok{ )) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{source\_name =} \FunctionTok{case\_when}\NormalTok{(}
\NormalTok{ source\_name }\SpecialCharTok{==} \StringTok{"CNN"} \SpecialCharTok{\textasciitilde{}} \StringTok{"CNN"}\NormalTok{,}
\NormalTok{ source\_name }\SpecialCharTok{==} \StringTok{"The Irish Times"} \SpecialCharTok{\textasciitilde{}} \StringTok{"The Irish Times"}\NormalTok{,}
\NormalTok{ source\_name }\SpecialCharTok{==} \StringTok{"ESPN"} \SpecialCharTok{\textasciitilde{}} \StringTok{"ESPN"}\NormalTok{,}
\ConstantTok{TRUE} \SpecialCharTok{\textasciitilde{}} \StringTok{"Other"}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{image\_status =} \FunctionTok{if\_else}\NormalTok{(}\FunctionTok{is.na}\NormalTok{(url\_to\_image), }\DecValTok{0}\NormalTok{, }\DecValTok{1}\NormalTok{)) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{left\_join}\NormalTok{(articles\_gamma\_sliced) }\SpecialCharTok{\%\textgreater{}\%}
\FunctionTok{mutate}\NormalTok{(}\AttributeTok{topic\_category =} \FunctionTok{case\_when}\NormalTok{(}
\NormalTok{ topic\_category }\SpecialCharTok{==} \StringTok{"US coverage"} \SpecialCharTok{\textasciitilde{}} \StringTok{"US coverage"}\NormalTok{,}
\ConstantTok{TRUE} \SpecialCharTok{\textasciitilde{}} \StringTok{"Other coverage"}\NormalTok{))}
\NormalTok{final\_model\_lm }\OtherTok{\textless{}{-}} \FunctionTok{lm}\NormalTok{(}\AttributeTok{formula =}\NormalTok{ popularity }\SpecialCharTok{\textasciitilde{}} \SpecialCharTok{+} \FunctionTok{factor}\NormalTok{(topic\_category) }\SpecialCharTok{+}\NormalTok{ source\_name }\SpecialCharTok{+}\NormalTok{ image\_status, }\AttributeTok{data =}\NormalTok{ final\_model\_set)}
\FunctionTok{summary}\NormalTok{(final\_model\_lm)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Call:
## lm(formula = popularity ~ +factor(topic_category) + source_name +
## image_status, data = final_model_set)
##
## Residuals:
## Min 1Q Median 3Q Max
## -2167 -593 -577 -8 432688
##
## Coefficients:
## Estimate Std. Error
## (Intercept) 895.9 303.7
## factor(topic_category)US coverage 688.8 158.0
## source_nameESPN -1553.9 690.0
## source_nameOther -884.8 193.0
## source_nameThe Irish Times -1498.3 256.1
## image_status 582.4 245.5
## t value Pr(>|t|)
## (Intercept) 2.951 0.00318 **
## factor(topic_category)US coverage 4.361 1.31e-05 ***
## source_nameESPN -2.252 0.02435 *
## source_nameOther -4.584 4.62e-06 ***
## source_nameThe Irish Times -5.850 5.06e-09 ***
## image_status 2.373 0.01768 *
## ---
## Signif. codes:
## 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 6029 on 10313 degrees of freedom
## (117 observations deleted due to missingness)
## Multiple R-squared: 0.006721, Adjusted R-squared: 0.00624
## F-statistic: 13.96 on 5 and 10313 DF, p-value: 1.265e-13
\end{verbatim}
Finally, we can formulate our statistically significant formula as
follows:
\textbf{Popularity = 895.9 + 688.8 US\_Coverage - 884.8
Other\_than\_CNN\_Source - 1498.3 The\_Irish\_Times\_Source - 1553.9
ESPN\_Source + 582.4 With\_Image}
\textbf{Interesting indeed, with all results now significant!} We can
now fairly conclude that US topics, on average are more popular than any
other topic category. Thanks to our unsupervised classification and the
amazing tidytext and stm packages.
CNN is also largely superior in terms of popularity, and is on average
above its peers by c.~885 reactions/comments/shares.
ESPN and the Irish Times aren't so popular on facebook. Probably users
rely mostly on other platforms such as Twitter or Instagram, which makes
a lot sense for ESPN users given the most likely younger generation of
followers
\emph{What's good to know is that having an image boosts popularity by
582, so definitely worth adding an image before you publish your
article!}
\end{document}
| {
"alphanum_fraction": 0.7083441643,
"avg_line_length": 50.5484094053,
"ext": "tex",
"hexsha": "88e2e268f4ae1dc675c52a66723e20daa83fc53a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "62b97393a1321bd4a135f1fe5f3f16c6bde1b391",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "moedaaboul/Internet_News_and_Consumer_Engagement",
"max_forks_repo_path": "index.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "62b97393a1321bd4a135f1fe5f3f16c6bde1b391",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "moedaaboul/Internet_News_and_Consumer_Engagement",
"max_issues_repo_path": "index.tex",
"max_line_length": 491,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "62b97393a1321bd4a135f1fe5f3f16c6bde1b391",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "moedaaboul/Internet_News_and_Consumer_Engagement",
"max_stars_repo_path": "index.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 21547,
"size": 73093
} |
\documentclass{article}
\usepackage[hidelinks]{hyperref}
\title{Math 15 Project - An algorithmic approach to Nonhomogenious equation solving}
\date{2021-06-04}
\author{Elias Schablowski}
\begin{document}
\pagenumbering{gobble}
\maketitle
\newpage
\tableofcontents
\pagebreak
\pagenumbering{arabic}
\section{Abstract}
\paragraph{Differential Equations} are a cornerstone of the physical world. Many processes can be measured and modeled most accurately with ODE.
\paragraph{Non-homogenious Differential Equations} are a family of equations which is more difficult to solve due to their dependence on two variables. It is also a very broad range of equations, for example, this category includes forced oscilators, double pendulums, and more.
\paragraph{Methods to solve} There are multiple methods to solve non-homogenious equations. Each with their own advantages and drawbacks. The most basic is undetermined coefficients.
\section{Theory}
\subsection{Undetermined Coefficients}
\paragraph{The Idea} behind undetermined coefficients is to take the sum of all linearly independent differentials. This is so that the differentials can cancel each other out when differentiated and thus form the the final equation.
$$y_p=\sum_{n=0}^Na_n f^{(n)}$$ Where N is the number of linearly independent differentials and $a_n$ is a scaling factor.
\paragraph{The advantage} of undetermined coefficients is that it is capable of turning a differential equation into a system of equations in O(N) time.
\paragraph{The disadvantage} of undetermined coefficients is the requirement for a finite set of linearly independent derivatives. This excludes many common trigonometric functions such as $tan(x)$ and $\frac{a}{x^n}$.
\subsection{Variation of Parameters}
\paragraph{The Idea} behind variation of parameters is to modify the the complementary solution in such a way as to create the forcing function.
\paragraph{The advantage} of variation of parameters is that it can solve a wide array of problems.
\paragraph{The disadvantages} of variation of parameters is that it often does not result in a system of equations with a single solution, and often requires the addition of additional constraints, therefore not lending itself to a computerized approach without bias. Furthermore, it is also requires the solutions to the complementary equation to be know, which further adds to the complexity.
\subsection{The Laplace Transform}
\paragraph{The Idea} behind using the laplace transform is replacing a problematic function with one that is simpler to solve with, and can be transformed back without information loss (1 to 1 transformation).
\paragraph{Why the Laplace Transform?} The Laplace Transform has multiple properties that make it an ideal candedate to solve ODEs and PDEs. These properties are linearity and uniqeness. Furthermore, many properties also resemble those of differentiation.
\paragraph{The disadvantage} of the Laplace Transform is that computing the laplace transform requires solving an itegral which is complex.
\section{Algorithm}
\subsection{Inputs}
\paragraph{The Equation} is inputed as two expression trees, with one representiong the ODE/PDE and the other representing the function.
\paragraph{The Preferred Method} is an optional parameter which specifies the preferred method to solve the ODE/PDE.
\subsection{Selection of Method}
This process is one of elimination, namely finding whether the faster methods work.
\subsubsection{Undetermined Coefficients}
The method in which you can determine whether the ODE can be solved using Undetermined Coefficients is to determine whether the function has a finite amount liearly independent derivatives.
The most efficient method is to determine whether the function has a finite amount of derivaties is to check whether the function has either a function with an infinete amount linearly independent derivatives, division by a variable, or exponentiation with a variable base and exponent
as per \hyperref[sec:derivative_proofs]{Proofs}.
\subsubsection{Variation of Parameters}
This method is not used, due to the proof of aplicability is more expensive than the efficiency gains of the Laplace Transform.
\subsubsection{The Laplace Transform}
This method is used when Undetermined Coefficients cannot be used.
\subsection{Undetermined Coefficients}
\begin{enumerate}
\item Find All linearly independent derivatives
\item Sum all linearly independent derivatives together
\item Remove all constant coefficients and shifts.
\item Inject variables into all critical locations (coefficients of functions and variables as well as shifts)
\item Plug this new function into the ODE
\item Solve the equation for the values of the injected variables \footnote{In my implementation, this was done using a system of equations (which limits this implementation to polynomials)}
\item Substitute the found values in for the injected variables
\end{enumerate}
\subsection{Laplace Transform}
\begin{enumerate}
\item Compute the Laplace Transform of the ODE.
\item Query for the initial conditions.
\item Solve for $X(s)$\footnote{I used Wolfram Alpa to do this step due to the complexities of Symbolic solving.}
\item Compute the Inverse Laplace Transform
\end{enumerate}
\footnotetext{See the implementation (as well as the tex document for this paper on \hyperlink{https://github.com/eschablowski/}{})}
\newpage
\section{Proofs}
\subsection{Finiteness of Derivates}
\label{sec:derivative_proofs}
\paragraph{Note: } I made these proof, as I didn't find any general method of checking for infine linearly independent derivaties.
\subsubsection{Addition}
\paragraph {Let} $f$ and $g$ be any function.
\paragraph {Let} $y = f(x) + g(x)$
\paragraph{Let} $N_f$ be the number of linearly independent derivatives of $f$, $N_g$ be the number of linearly independent derivatives of $g$.
$$y^{(n)}=f^{(n)}(x)+g^{(n)}(x)$$
\paragraph{Let} $L_f = \sum_{n=0}^{N_f} f^{(n)}$ and $L_g = \sum_{n=0}^{N_g} g^{(n)}$
\paragraph{Since} $L_f$ and $L_g$ are the sums of linearly independent derivatives of $f$ and $g$ respectively,
then the sum linearly independent derivatives of $y$ is $L_g + L_f$.
\paragraph{Therefore} the linearly independent derivaties of $y$ is fine as long as $N_g \neq \infty$ and $N_f \neq \infty$
\subsubsection{Multiplication}
\paragraph {Let} $f$ and $g$ be any function.
\paragraph {Let} $y = f(x) * g(x)$
\paragraph{Let} $N_f$ be the number of linearly independent derivatives of $f$, $N_g$ be the number of linearly independent derivatives of $g$, and $N$ be $N_f$ if $N_f > N_g$ or $N_g$ otherwise.
$$y^{(n)}(x)=\sum_{i=1}^n f^{(i-n)}(x) * g^{(n-i)}(x)$$
\paragraph{Therefore,} if $f$ and $g$ have finite linearly independent derivatives, $y$ must also have a finine number of linearly independent derivatives.
\pagebreak
\section{Further items for Consideration}
\subsection{The Gaver/Stehfest Algorithm}
The Gaver/Stehfest Algorithm is a method for approximating the value of the inverse Laplace Transform. \citation{stehfest_1970}
This can be useful for solving ODEs for program internals and/or creating a function from ODEs.
The reason why it was not used in this program was due to the fact that the goal of the program was to compute $y_p$ for output, not for computing $y(x)$ at points of $x$.
\subsection{Further proofs}
The following rules of differentiation still require proofs for their finiteness from their arguments:
\begin{enumerate}
\item Chain rule
\item Division (probably most difficult)
\item exponentiation
\end{enumerate}
\subsection{Improvements of the program}
The following improvements can still be made to the program:
\begin{enumerate}
\item Add a CLI.
\item Implement a better symbolic equation solver.
\end{enumerate}
\pagebreak\nocite{*} % nocite is used since tex is removing the \cite commands for some reason.
\bibliography{paper}
\bibliographystyle{ieeetr}
\end{document}
| {
"alphanum_fraction": 0.7863161866,
"avg_line_length": 67.3813559322,
"ext": "tex",
"hexsha": "6097a381f5114656a5985bef3d224e6bf0848d40",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c84d4c663cf6b20915e4c634783a903a340a417c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "eschablowski/ODE-solver",
"max_forks_repo_path": "paper/paper.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c84d4c663cf6b20915e4c634783a903a340a417c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "eschablowski/ODE-solver",
"max_issues_repo_path": "paper/paper.tex",
"max_line_length": 394,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c84d4c663cf6b20915e4c634783a903a340a417c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "eschablowski/ODE-solver",
"max_stars_repo_path": "paper/paper.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1925,
"size": 7951
} |
\documentclass[master]{NTHUthesis}
\usepackage[utf8]{inputenc}
%%% Customization %%%
\usepackage{zhlipsum, lipsum} % dummy text
\usepackage{graphicx} % figures
\usepackage{booktabs} % tables
\usepackage[ruled]{algorithm2e} % algorithms, list of algorithms
\usepackage{cite} % bibliography
%%% Necessary %%%
\input{thesis_info}
\begin{document}
\makecover
\pagenumbering{Roman}
%%% Necessary %%%
\begin{abstractZH}
\input{contents/abstractZH}
\end{abstractZH}
%%% Optional %%%
\begin{acknowledgementsZH}
\input{contents/acknowledgementsZH}
\end{acknowledgementsZH}
%%% Necessary %%%
\begin{abstractEN}
\input{contents/abstractEN}
\end{abstractEN}
%%% Optional %%%
\begin{acknowledgementsEN}
\input{contents/acknowledgementsEN}
\end{acknowledgementsEN}
%%% Necessary %%%
\maketoc
%%% Optional %%%
\phantomsection
\listofalgorithms
\addcontentsline{toc}{chapter}{List of Algorithms}
\clearpage
\pagenumbering{arabic}
\chapter{Introduction}
\lipsum[1-3]
\begin{table}[t]
\centering
\begin{tabular}{ c | c c }
\toprule
cell1 & cell2 & cell3 \\
\midrule
cell4 & cell5 & cell6 \\
cell7 & cell8 & cell9 \\
\bottomrule
\end{tabular}
\caption{An example table}
\label{tab:my_label}
\end{table}
\chapter{Section Title}
\lipsum[1-3]
\begin{figure}
\centering
\includegraphics[width=.6\linewidth]{example-image-a}
\caption{An example figure.}
\label{fig:example-a}
\end{figure}
\section{Section Title}
\lipsum[4-5]
\subsection{Subsection Title}
\lipsum[6]
\chapter{Methodology}
\lipsum[1-3]
\begin{algorithm}[t]
\SetAlgoLined
\KwResult{Write here the result }
initialization\;
\While{While condition}{
instructions\;
\eIf{condition}{
instructions1\;
instructions2\;
}{
instructions3\;
}
}
\caption{How to write algorithms}
\end{algorithm}
\chapter{Related Works}
This is a dummy sentence~\cite{Alpher02}. This is a dummy sentence~\cite{Alpher03}. This is a dummy sentence~\cite{Alpher04}. A dummy Text\footnote{A dummy footnote.}.
\bibliographystyle{plain}
\bibliography{references}
\end{document}
| {
"alphanum_fraction": 0.7023474178,
"avg_line_length": 19.7222222222,
"ext": "tex",
"hexsha": "ced84c68a789a80fd419fcfb5f72ff59c02d2fa2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3290c1a20653b5c41b5e715ced80d776e8e82bb7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "elsa-lab/NTHUthesis",
"max_forks_repo_path": "thesis_main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3290c1a20653b5c41b5e715ced80d776e8e82bb7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "elsa-lab/NTHUthesis",
"max_issues_repo_path": "thesis_main.tex",
"max_line_length": 167,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3290c1a20653b5c41b5e715ced80d776e8e82bb7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "elsa-lab/NTHUthesis",
"max_stars_repo_path": "thesis_main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 647,
"size": 2130
} |
%%
%% Chapter: 6
%%
\cleardoublepage % flush all material and start a new page, start new odd numbered page.
\addchap{Appendices} %% Add chapter without number
\label{cha:Appendices} %% No special characters, no space
%%% DELETE EVERYTHING BETWEEN %%%%%%%%%%%%%
\emph{This section contains all
\begin{itemize}
\item questionnaires,
\item interview transcripts,
\item pilot reports,
\item detailed tables, etc.
\end{itemize}
This section should contain material that supplements the main text of the report (spreadsheets, detailed experimental results, details of equation derivation, program listings, etc.).
If an equation is included in the body of the report, the derivation of that equation could be shown in the Appendix, or if you have a graph from a spreadsheet or program calculation, you could include the spreadsheet data or a program listing in the Appendix.
The key point is that the Appendix is supplemental information.
Depending on what was requested in the problem statement, detailed drawings might also go in the Appendix and only the overall assembly drawing would go in the body.
}
\clearpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{Appendix I: Work plan}
\addcontentsline{toc}{section}{Appendix I: Work plan}%
\begin{figure}[h]
\begin{ganttchart}[
hgrid,
%vgrid,
expand chart=\textwidth, % shrink/expand chart to available width
milestone/.append style={inner ysep=3mm}, % make milestones more visible
bar/.append style={fill=red!50}, % make bars red
bar label node/.style={text width=2.5cm,
align=right,
anchor=east,
font=\footnotesize\raggedleft},
%link mid=1, link bulge=1,
%link/.style={-to, rounded corners = 3pt},
%link tolerance=0,
calendar week text={W\currentweek}, % Change from 'Week 5' to 'W5'
time slot format=isodate
]{2021-02-01}{2021-04-30}
%\gantttitlecalendar{year, month=shortname, week=5} \\ % Set week=5 if chart start at week 5
\gantttitlecalendar{year, month=shortname} \\ % no week shown
\ganttbar{Task with \\longer text}{2021-02-15}{2021-02-20} \\ % One week for Task 1
\ganttbar{Task 2 also has long text}{2021-02-22}{2021-03-06} \\ % two week for Task 2
\ganttbar{Task 3}{2021-03-08}{2021-03-27} \\ % three week for Task 3
\ganttbar{Task 4}{2021-03-29}{2021-04-03} \\ % One week for Task 4
\ganttmilestone{Milestone 1}{2021-04-05}
\end{ganttchart}
\caption{Gantt chart for project activities.}
\end{figure}
\clearpage % have next section on separate page
%\cleardoublepage % have next section on new odd numbered page (right)
\section*{Appendix II: Budget}
\addcontentsline{toc}{section}{Appendix II: Budget}%
\begin{center}% insert some space before and after the table
\begin{minipage}{\columnwidth}% ensure that there is no page or column break
\centering% center the table
\label{tbl:Budget}
\captionof{table}{Proposed budget}
\begin{tabular}{llccrr}
\hline \\
Item & Description & Unit & Quantity & Rate (Shs.) & Amount (Shs.) \\
\\
\hline \\
1 & Food & [1] & 2 & 10,000 & 20,000 \\
2 & Drinks & [1] & 2 & 1,000 & 2,000 \\
3 & Ghost writer & [1] & 1 & 478,000 & 478,000 \\
\\
& \textbf{TOTAL} & & & & \textbf{500,000}\\
\hline \\
\end{tabular}
\end{minipage}
\end{center}
\clearpage % have next section on separate page
%\cleardoublepage % have next section on new odd numbered page (right)
\section*{Appendix III: Questionnaire}
\addcontentsline{toc}{section}{Appendix III: Questionnaire}%
\begin{center}
\begin{minipage}{\columnwidth}% ensure that there is no page or column break
\centering
\fbox{
\label{fig:Questionnaire}
\includegraphics[scale=0.62]{600-Appendices/Questionnaire/Questionnaire}
}
\captionof{figure}[Questionnaire]{Questionnaire to survey something.\textsuperscript{a}}
\small\textsuperscript{a}{SDAPS for optical mark recognition available at \url{https://sdaps.org/}}
\end{minipage}
\end{center}
\clearpage % have next section on separate page
\section*{Appendix IV: Some \LaTeX{} examples}
This section is to be deleted/commented by the author.
\input{600-Appendices/Examples/examples} | {
"alphanum_fraction": 0.6841614182,
"avg_line_length": 42.0294117647,
"ext": "tex",
"hexsha": "e8d7ed96b994033c8de2c3550259e14606d6b457",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "752fa78fb75b0c30af0be2bf04a72d9563e3f507",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "NDUWRDC/NDU-Thesis-Template",
"max_forks_repo_path": "600-Appendices/Appendices.tex",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "5245b8eff80b18cbc0f3443817fa62de05a40355",
"max_issues_repo_issues_event_max_datetime": "2021-05-08T12:32:17.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-17T12:17:29.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "davidNDU/test",
"max_issues_repo_path": "600-Appendices/Appendices.tex",
"max_line_length": 262,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "5245b8eff80b18cbc0f3443817fa62de05a40355",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "davidNDU/test",
"max_stars_repo_path": "600-Appendices/Appendices.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-17T13:57:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-17T13:57:10.000Z",
"num_tokens": 1301,
"size": 4287
} |
\documentclass[11pt,oneside%,draft%
]{memoir}
% --- Packages ----------------------------------------------
\usepackage[USenglish]{babel}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{textcomp}
\usepackage{color}
\usepackage{graphicx}
\usepackage{IEEEtrantools}
\usepackage{verbatim}
\usepackage{tocloft}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{braket}
\usepackage[hyphens]{url}
\usepackage{makeidx}
\usepackage[colorlinks=true,urlcolor=blue,linkcolor=blue,linktocpage=true]{hyperref}
% --- Book appearance ---------------------------------------
%\setstocksize{57em}{37em}
%\settrimmedsize{57em}{37em}{*}
%\setlrmarginsandblock{5em}{*}{1}
%\setulmarginsandblock{5em}{*}{1}
%\setlength{\headsep}{1.33em}
%\setlength{\footskip}{2.5em}
%\setlength{\parindent}{0em}
%\setlength{\parskip}{0.6em}
%\fixpdflayout
%\checkandfixthelayout
\setstocksize{6in}{6in}
\settrimmedsize{6in}{6in}{*}
%\setstocksize{8.7in}{6in}
%\settrimmedsize{8.7in}{6in}{*}
\setlrmarginsandblock{0.8in}{*}{1}
\setulmarginsandblock{0.8in}{*}{1}
\setlength{\headsep}{0.215in}
\setlength{\footskip}{2.5em}
\setlength{\parindent}{0em}
\setlength{\parskip}{0.6em}
\fixpdflayout
\checkandfixthelayout
\makepagestyle{thphp}
\makeevenhead{thphp}{\thepage}{\rightmark}{\thepage}
\makeoddhead{thphp}{\thepage}{\rightmark}{\thepage}
\makeoddfoot{thphp}{}{{}}{}
\makeevenfoot{thphp}{}{{}}{}
\pagestyle{thphp}
\renewcommand{\cftdot}{}
\setlength\cftparskip{1pt}
% --- Mathematical notation ---------------------------------
% Environments
\newenvironment{eqna}{\begin{IEEEeqnarray*}{c}}{\end{IEEEeqnarray*}\ignorespacesafterend}
\newenvironment{eqnb}{\begin{IEEEeqnarray*}{rCl}}{\end{IEEEeqnarray*}\ignorespacesafterend}
\newenvironment{narration}{\begin{em}}{\end{em}}
\newcommand{\nimi}[1]{\IEEEyesnumber\label{#1}}
\renewenvironment{equation}{\sdfsdfsd}{\sdfsf}
\renewenvironment{align}{\sdfsfsd}{\sdfsd}
% Derivatives
\newcommand{\der}[2]{\frac{\dd#1}{\dd#2}}
\newcommand{\pder}[2]{\frac{\partial#1}{\partial#2}}
\newcommand{\cder}[2]{\frac{D #1}{D #2}}
% Symbols
\newcommand{\puoli}{\frac{1}{2}}
\newcommand{\yksi}{\mathfrak{1}}
\newcommand{\andd}{\qquad\textrm{and}\qquad}
\newcommand{\orr}{\qquad\textrm{or}\qquad}
\newcommand{\wheree}{\qquad\textrm{where}\qquad}
\newcommand{\dd}{\mathrm{d}}
\newcommand{\ii}{\mathrm{i}}
\newcommand{\ee}{\mathrm{e}}
\newcommand{\circc}{\tau}
\newcommand{\paika}{\mathfrak{s}}
\renewcommand{\vec}[1]{\mathbf{#1}}
\newcommand{\dvec}[1]{\dot{\vec{#1}}}
\newcommand{\ddvec}[1]{\ddot{\vec{#1}}}
\newcommand{\pvec}[1]{\primed{\vec{#1}}}
% Operators
\DeclareMathOperator{\diag}{diag}
\DeclareMathOperator{\Det}{Det}
\DeclareMathOperator{\Tr}{Tr}
\DeclareMathOperator{\reaaliosa}{Re}
\DeclareMathOperator{\imaginaariosa}{Im}
\renewcommand{\Re}{\reaaliosa}
\renewcommand{\Im}{\imaginaariosa}
\newcommand{\primed}[1]{\hat{#1}}
\newcommand{\ind}[1]{\mathfrak{#1}}
\newcommand{\arxivreference}[1]{\url{#1}}
% Differential geometry
\newcommand{\chris}[3]{\{{_{#1}}\!{^{#2}}\!{_{#3}}\}}
\newcommand{\tensy}[2]{#1^{#2}}
\newcommand{\tensa}[2]{#1_{#2}}
\newcommand{\tensay}[3]{#1_{#2}^{\phantom{#2}#3}}
\newcommand{\tensya}[3]{#1^{#2}_{\phantom{#2}#3}}
% Dialog characters
\newcommand{\hea}{\(\blacklozenge\)\;}
\newcommand{\heb}{\(\Game\)\;}
% Colors and indices
\definecolor{safi}{RGB}{0,100,100}
\definecolor{oranssi}{RGB}{255,128,0}
\newcommand{\coa}{{\color{black}\bullet}}
\newcommand{\cob}{{\color{oranssi}\bullet}}
\newcommand{\coc}{{\color{cyan}\bullet}}
\newcommand{\cod}{{\color{red}\bullet}}
\newcommand{\coe}{{\color{magenta}\bullet}}
\newcommand{\cof}{{\color{green}\bullet}}
\newcommand{\cog}{{\color{safi}\bullet}}
\newcommand{\coh}{{\color{yellow}\bullet}}
\newcommand{\coi}{{\color{blue}\bullet}}
% UNCOMMENT THE NEXT LINE FOR TRADITIONAL GREEK INDICES
%\renewcommand{\coa}{\alpha}\renewcommand{\cob}{\beta}\renewcommand{\coc}{\gamma}\renewcommand{\cod}{\delta}\renewcommand{\coe}{\mu}\renewcommand{\cof}{\nu}\renewcommand{\cog}{\rho}\renewcommand{\coh}{\sigma}\renewcommand{\coi}{\xi}
% UNCOMMENT THE NEXT LINE FOR THE TRADITIONAL CIRCLE CONSTANT 2 PI
%\renewcommand{\circc}{2\pi}
\begin{document}
\frontmatter
\begin{titlingpage}
\begin{centering}
\HUGE\textbf{Time}\\
\vspace{0.4em}
\normalsize\emph{by}\\
\vspace{0.4em}
\textsc{Konsta Kurki}\\
\vspace{0.4em}
\textsc{\today}\\
%\vspace{5em}
%\vfill
\vspace{5em}
\textbf{---work in progress---}\\
\end{centering}
\vfill
Copyright {\textcopyright} 2015 Konsta Kustaa Kurki
This work is licensed under a Creative Commons Attribution 4.0 International License. See \url{http://creativecommons.org/licenses/by/4.0/} for the license and \url{http://github.com/konstakurki/time} for the source material.
\end{titlingpage}
\chapter{Change log}
\begin{narration}
This is an incomplete list of additions and changes made since the beginning of the writing.
\end{narration}
\textsc{June 28, 2015}\, Initial commit to \url{github.com/konstakurki/time}
\newpage
\tableofcontents
\mainmatter
%\book{The classical picture}
%\part{The very elements}
\chapter{The beginning}
\hea Excuse me.
\heb Yes?
\hea Could you tell me about time?
\heb Hmm. You mean the stuff you read from the upper right corner of your iPhone?
\hea I do not have an iPhone. But if I had I think it would be that.
\heb Why would you like to hear about it?
\hea I know how to read a clock but that's where my understanding ends and wondering begins. Sometimes I think I flow in time. Next moment I think time flows around me. And then I think nothing flows---or everything.
\heb I certainly feel you! Time's a peculiar fellow. Did you notice what kind of words you just used? Like `sometimes' and `moment'.
\hea Yeah, I know, it's embarrassing.
\heb You cannot escape him! Or her.
\hea I know. I'ts just hard to communicate without referring to time. Does it mean that I don't understand my own words?
\heb Well, it may be so. But don't worry---you're no worse than any one of us.
\hea You mean there's no person on Earth who understands his own words?
\heb I think understanding is just a feeling. Peace, acceptance. But it may be just a flash in time. In \emph{time!}
\hea For me it is not so easy to accept things like the fact that now it is a different moment than a couple of minutes ago. And now it is again different.
\heb It surely is.
\heb But see, there is no brief answer to your question.
\hea I think I can take a long one.
\heb You also think I would give you such?
\hea Wouldn't you?
\heb Your eyes relly seem to be searching for something. Maybe you really want to think these things. Am I right or are you just hoaxing me?
\hea I would really, really want to hear.
\heb All right, here's the deal: as long as you want to keep going, I will guide you through the universe, towards time. Our path will be long and difficult. But if I see you're not really interested, then I'll leave you just there. Or maybe we can do something else. But time needs your perfect focus. Understand?
\hea Understand.
\heb And one more thing. We problably won't make it to the end of the path. That's because I haven't reached it. As far as I know no one has. All we can do is just wander around and build some theory. Beautiful, but imperfect theory. Can you accept that?
\hea Just give me something. I must get somewhere with this question. Please. I've asked for many people and all they say is something really silly like 'time does not exist'. If something makes me that anxious it definitely must be there!
\heb I like you. I can feel the pain inside you with all my neurons. Not that I liked that someone does not feel good, but you seem to care about things that people usually do not even notice.
\heb Do you have any background?
\hea Acually I do have something. I've studied foundations of mechanics. I know Newton's laws, the action principle and Hamilton's equations. And I also know some quantum mechanics.
\hea But it's all really on the level of doing calculations. In university they don't really care of understandin; it's just all about graduating and becoming a cool stuper string theorist or something.
\heb Good! Then we can fly over some less essential stuff. I promise we'll be careful with the important things.
\subsection{Sharpening the question}
\heb Tell me some important things of your life.
\hea My family. And friends. Freedom.
\heb Something more primitive.
\hea Hmmm \ldots birth.
\heb Excellent! The primitive elements we use in construing our lives are events. How would you order your birth, the day you learned to speak and the cutting of your umbilical cord?
\hea I guess I could order them in many ways. Of course there is the one way of ordering them, but probably you don't mean that.
\heb Very often the right direction is so obvious and ridiculously simple that you do not even dear to think of it. So go on.
\hea Ok. So first I was born and then my umbilical cord were cut. Then, after an exhaustive sequence of trials and errors I said my first word. Am I right?
\heb Yes you are! What I mean by `time' is the affair---whatever it is in its deepest---that creates this order. Or at least I don't know how to describe it more precisely. Do you agree?
\hea I guess I do.
\heb We can measure it easily, right? For example by whatching the corner of the stupid iPhone you mentioned.
\heb Yes we can. But there is some pretty complicated technology inside that device. There are also simpler methods.
\hea Of course. I think people have investigated how much time there is between two events for generations by counting for example how many summers or perhaps sunsets fits in between the events. I guess any repeating phenomenon would do it.
\heb That is indeed how each and every clock work. The `time that firs in between the events' is called time interval of the events and is usually denoted by \(\Delta t\), the big triangle, the Greek capital delta meaning interval and \(t\) time.
\heb If we count sunsets we get a bigger number than if we counted summers. But what is beautiful is that the relation between the frequencies of sunsets and summers never changes! It's always something like 365. If we take any two good clocks, by which I mean a device built for measuring time intervals, their relative paces remain always the same.
\hea That definitely makes sense. If your clock has same kind of second today than mine, I assume with no though that same is true also tomorrow. Does this mean that the time interval of two events is in some sense an absolute concept?
\heb Up to the time scale fixed by the clock, yes. We can of course convert time intervals read from different clocks to each other by knowing the relative paces of the clocks.
\heb It is useful to choose some reference time interval, for example day which is the interval of two successive midnights, or the second you mentioned, and express all intervals as multiples of it. Then each and every human being get the same number of days for the same two events.
\hea So the time interval of two arbitrary events measured in days---or in seconds---is an absolute, observer--independent number. Sounds very reasonable.
\heb Now we can form a mathematical model of time. Take the real number line \(\mathbb{R}\) and attach events on it in such a way that bigger numbers correspond to later times and so that the time interval of two events corresponds to the difference in the numbers attached to the events. A real number \(t_E\) attached to an event \(E\) is called a time coordinate.
\heb Time coordinate is of course unique. It depends on the unit of time we chose. We can also choose any event to label with \(t=0\), that is, we are free to choose the origin of time. The numbers zero and one are algrbraically special, but they do not have any physical significance; they are just artifacts of our choice to use \(\mathbb{R}\) as our mathematical model.
\hea Interesting.
\heb The real number line is topologically a continuum, which means that we can sensibly think of points as close as we wish. Physically that means that we can talk about time intervals as short as we want.
\hea Is that realistic? I mean that I can easily think of a year, a day, a second and even a blink of an eye, but where do we know that we can divide the blink as sharply as we wish?
\heb We don't. If nothing more, the real number line is a very convenient way to label events. And I guess we relly have a certain feeling of time being continuous; at least it does not feel granural but smooth.
\hea That's true.
\hea But wait a second. I watched \emph{Interstellar} a couple of weeks ago. In one scene the main character Cooper gives a wrist watch to his daughter Murph and explains that when he returns from his space voyage and they compare their watches, they will find out that the clocks are not anymore in sync, even though the clocks are identical. Was it crap or what?
\heb No it wasn't. Cooper is referring to a concept called proper time. It means the personal time experienced by an observer which in fact is different for Cooper and Murph. But Cooper flies in very exotic circumstances. Here, on Earth where we cannot move as fast as Cooper and the proper times of us all are pretty much the same. That effectively universal proper time is called Galilean or Newtonian time and is the time which we described with a real number \(t\).
\hea So now you tell me that the time interval was not absolute. Why did you try to lie to me?
\heb In our everyday regime it really is absolute. We do not notice any deviations, and for hundreds of years people really believed in its strick absoluteness. If you are patient, we will get to the relativity. Right now it would probably be too complicated to think of time intervals as subjective, but later we will see that actually the relativistic picture is astonishingly simple and beautiful.
\hea Okay. I accept that, so let's keep \(t\).
\section{Space}
\heb Could we measure intervals of some other physical, perhaps even more concrete, thing than time?
\hea You must mean distances in space.
\heb Yes I do. If you were about to measure say the distance between two stones, how would you do that?
\hea I would take a rope, tie some knots equidistantly on it, tighten it between the stones and count the knots in between.
\heb Excellent! I would have taken a meter stick but this is much more elegant. The process is very similar to measuring time; here the rope plays the role of a clock and the knot spacing sets up a distance scale.
\hea And like time intervals, also spatial distances are absolute up to a distance scale!
\heb Yes, at least in the regime of everyday phenomena.
\heb Space is very much like time, but there is a difference: in time you can only imagine to go backwards and forward; in space you can also go left, right, up and down.
\hea This sounds a bit stupid to me. I think that in time I cannot move back nor forward; time just passes. In space I can move, but it takes some time. I mean that `moving' means that location changes with time. It is ridiculous to say that time changes with time.
\heb Yeah, yeah. That's why I said that you can imagine. It is somewhat sloppy language. But the point is that space is three--dimensional while time has only one dimension.
\hea Yes. That's a difference.
\subsection{Some properties}
\heb What is the shortest path between two points in space?
\hea If I tighten a rope between the points, it of course settles down to a configuration in which there are as little rope as possible between the points. That's what tightening means. So the path the rope takes is the shortest path between the two points. The shortest path is a straight path.
\heb Excellent. This is one of the elementary properties of our space. The triangle inequality of elementary geometry can be seen as a special case of this fact.
\heb Let me take a stone and throw it in the air. Or you can do it.
\hea Ok.
\heb And watch carefully.
% kuva: kivi lentaa
\hea I can imagine that the rock draws a curve in space.
\heb Yes. If you wait for a time \(\Delta t\), the curve really is curved, but if we now decrease \(\Delta t\) down to an almost vanishing time interval \(\dd t\), we get only a small portion of the originall curve which looks practically straight. Let me denote that small arrow by \(\dd\vec{x}\).
\heb Such an arrow is called a vector. A vector is characterized by its length and the direction it points to.
\heb Now we can divide \(\dd\vec{x}\) by \(\dd t\) and get another vector
\begin{eqna}
\der{\vec{x}}{t}\doteq\dvec{x},
\end{eqna}
the velocity. If we used \(\dd t/2\) instead of \(\dd t\), the particle would have made only half of \(\dd\vec{x}\). The velocity does not depend on the particular value of \(\dd t\) if it is small enough.
\heb You can think of the vectors \(\dd\vec{x}\) and \(\dvec{x}\) as being localized at somewhere on the trajectory of the baseball, but right now it is not important.
\hea Why the location does not matter?
\heb It is because we can parallel transport a vector, that is, move it while keeping its length and direction unchanged. If you don't trust your sense of direction, you can do it for example with the aid of a compass.
\hea Wait a minute. If I go around the magnetic north pole, the needle of the compass turns around. I wouldn't really trust even on a compass.
\heb True. But there is also another elegant and physically interesting way of doing the parallel transport.
\hea What is it?
\heb Take a wheel of a bicycle and put it to spin. You will notice that the spinning makes it difficult to change the direction of the axis. With fast enough spin and hyva ripustus the wheel can be used to keep track of a direction.
\hea Oh, you are talking about a gyroscope. They're interesting devices.
\heb We can add two vectors by moving, of course by parallel transport, the other arrow starting from the end of the other. The sum is the arrow starting from the beginning of the first vector and ending at the end of the second. It is obvious that vectors can also be multiplied by numbers.
\heb Vector spaces are well--known objects in mathematics, and if you are interested, you can find some details from for example the 2006? book by Riley, Hobson and Bence. Vectors are quite intuitive fellows, perhaps just because we see them in the physical world everywhere around us.
\section{Tensors}
\heb If there is a lamb in a room, the air is hot near the lamb. The temperature, let me denote it by \(\phi\), varies through space. Such a thing, a thing describable by a number at every point in space, is called a scalar field.
\heb We may imagine building surfaces of equal temperature in space. Can you imagine how does a surface of thirty degrees of celcius look like?
\hea The room temperature is less than thirty degrees, and the lamb is probably hotter. I guess the temperature increases somewhat smoothly, so if we close the lamb from any direction, at some point we must pass a point in which the temperature is exactly thirty degrees.
\hea Those points points form the surface of thirty degrees. It probably looks more or less like a sphere with the lamb in its center.
\heb Exactly. If we look close enought, the surfaces of equal temperature look flat, and are spaced equidistantly.
\heb Now think about two points close to each other, separated by \(\dd\vec{x}\). The temperature difference between them is \(\dd\phi\). If you draw a picture of the surfaces of equal temperatures and \(\dd\vec{x}\), it is clear that \(\dd\phi\) is equal to the number of the surfaces pierced by \(\dd\vec{x}\). We may write it as
\begin{eqna}
\dd\phi=\nabla\phi\cdot\dd\vec{x}.
\end{eqna}
\hea I see.
\heb So the temperature field \(\phi(\vec{x})\) defines a function at each point in space. That function, called the gradient of the temperature and denoted by \(\nabla\phi\), takes \(\dd\vec{x}\) in and produces the difference \(\dd\phi\) between the points separated by \(\dd\vec{x}\).
% kuva: vektori percettaa gradientin
\heb If you watch the picture, you see that the gradient is linear in \(\dd\vec{x}\), that is,
\begin{eqna}
\nabla\phi\cdot(a\,\dd\vec{x}+b\,\dd\vec{y})=a\nabla\phi\cdot\dd\vec{x}+b\nabla\phi\cdot\dd\vec{y}
\end{eqna}
with \(a\) and \(b\) numbers and \(\dd\vec{x}\) and \(\dd\vec{y}\) vectors.
\heb A function like \(\nabla\phi\), that is a linear function which takes a vector and produces a number, is called a one--form. If we have two one--forms \(\omega\) and \(\sigma\), we may define their linear combination as
\begin{eqna}
(a\,\omega+b\,\sigma)\cdot\vec{x}=a\,\omega\cdot\vec{v}+b\,\sigma\cdot\vec{v}
\end{eqna}
for any numbers \(a\) and \(b\) and any vector \(\vec{v}\).
\hea So the one--forms also form a vector space
\heb Yes. It is called the dual vector space. Sometimes one--forms are called dual vectors.
\heb We can also consider linear maps, or tensors as they are usually called, which take any number of vectors as inputs and are linear in every input vector.
\heb The notation becomes a bit complex if a tensor has many inputs. That is why I think we should adopt a funny but practical notation. If we have for example a tensor \(T\) which takes three vectors and the input vectors are \(\vec{a}\), \(\vec{b}\) and \(\vec{c}\), we could write
\begin{eqna}
T(\vec{a},\vec{b},\vec{c})\doteq\tensa{T}{\coa\cob\coc}\vec{a}^\coa\vec{b}^\cob\vec{c}^\coc.
\end{eqna}
\hea Looks hilarious. What are those colored balls?
\heb They are wireless connectors that plug the vectors into the tensor. People usually prefer greek letters like \(\alpha\) and \(\mu\), but I think the colored balls are more clear. If you would like to see letters instead of colors, you must go to the source code of our lives and uncomment one line.
\hea I like color.
\heb Me too! The order of writing of the vectors and the tensor does not matter, the colors keep track of which connector, or index as they are often called, is connected to which. A single vector \(\vec{v}\) mapped by a dual vector \(\omega\) can be therefore written as
\begin{eqna}
\omega\cdot\vec{v}=\omega_\coa\vec{v}^\coa=\vec{v}^\coa\omega_\coa.
\end{eqna}
This suggests that we can think of \(\vec{v}\) as a tensor which takes the one--form \(\omega\) as its input! It does not really matter how we choose to think of it; we just connect an upper index to a lower and that's it.
\hea So we could have tensors with lower and upper indices, for example \(\tensya{K}{\coa\cob\coc}{\cod\coe\cof}\), taking vectors and dual vectors as inputs.
\heb Yes, we very well could. And as you probably guess, we can write any number of tensors in a row and then connect, or contract as it is often said, any lower index with any upper one.
\hea Actually it is useful to think that we first form an outer product, for example
\begin{eqna}
\tensya{(K\otimes T)}{\coa\cob\coc}{\cod\coe\cof\cog\coh\coi}\doteq\tensya{K}{\coa\cob\coc}{\cod\coe\cof}\tensa{T}{\cog\coh\coi},
\end{eqna}
It is a tensor which takes vectors and dual vectors just as you would guess from the indices. It's value is the value of \(T\) fed with three vectors multiplied byt \(K\) fed by three vectors and three one--forms. It is easy to verify that it is linear in every index. Then we contract any upper index--lower index pairs we want.
\heb We can for example contract all three upper indices with the last three lower indices of the above outer product and get
\begin{eqna}
\tensya{(K\otimes T)}{\coa\cob\coc}{\cod\coe\cof\coa\cob\coc}\doteq\tensa{(KT)}{\cod\coe\cof}
\end{eqna}
which is a tensor with three lower indices.
\hea How elegant is that! I just take some tensors, form an outer prodct of them by writing them in a row in whichever order I want, and then contract the indices I want. Every contraction removes one upper and one lower index. If there are equal numbers of upper and lower indices and I contract them all, I am left with a number, right?
\heb It is just that simple. Vectors are tensors with one upper index and one--forms tensors with one lower index. A number can be thought of as a tensor with no indices at all.
\hea Can I take a linear combination of any two tensors?
\heb If they have the same index structure, then yes. Tensors of a specific type form a vector space. So there is a vector space of tensors of type \(T_{\coa\cob}\), of type \(\tensya{T}{\coa}{\cob}\), of type \(\tensay{T}{\coa}{\cob\coc}\) and so on.
\hea This is very cool. I think I'm gonna read something about vector spaces and tensors.
%
%\heb Yeah, do that! One
\subsection{Inner product}
\hea I read that a vector space is often equipped with an inner product. An inner product of two vectors is linear and symmetric in both of the vectors. I'm a bit confused, because Wikipedia denoted the outer product of two vectors \(\vec{v}\) and \(\vec{w}\) by \(\vec{v}\cdot\vec{w}\), that is exactly the same way as we denoted a vector mapped by a one--form.
\hea I guess that since the inner product is linear, we could write
\begin{eqna}
\vec{v}\cdot\vec{w}=g_{\coa\cob}\vec{v}^\coa\vec{w}^\cob
\end{eqna}
with \(g\) a tensor.
\heb Great! Because the inner product is symmetric, we have \(g_{\coa\cob}=g_{\cob\coa}\). This may seem a bit sloppy, but it means that
\begin{eqna}
g_{\coa\cob}\vec{v}^\coa\vec{w}^\cob=g_{\cob\coa}\vec{v}^\coa\vec{w}^\cob
\end{eqna}
for any vectors \(\vec{v}\) and \(\vec{w}\).
\heb An inner product describes the overlap of two vectors. If consider the overlap of a vector, for example a separation \(\dd\vec{x}\), with itself, we get a number wich is proportional to the square of the length of \(\dd\vec{x}\). So there is a natural way to choose the tensor \(g\).
\hea Let me guess: we choose \(g\) so that
\begin{eqna}
|\dd\vec{x}|^2=g_{\coa\cob}\dd\vec{x}^\coa\dd\vec{x}^\cob
\end{eqna}
exactly?
\heb Yes. The tensor \(g\) is called the metric tensor of space, since it measures the distance separated by \(\dd\vec{x}\). For \(\dd\vec{x}\) one usually writes \(|\dd\vec{x}|^2\doteq\dd s^2\) and for any other vector \(\vec{v}\) that \(|\vec{v}|^2\doteq\vec{v}^2\). It is a bit inelegant to have different conventions for different vectors but this notation is standard and widespread.
\subsection{Rising and lowering}
\heb The metric is so important that for each vector \(\vec{v}^\coa\) it is useful to define a dual vector \(g_{\coa\cob}\vec{v}^\cob\doteq\vec{v}_\coa\). That is, we really do not bother to think vectors and dual vectors as different kinds of things; we only have vectors which can be written the index in the upstairs or the downstairs.
\hea Okay, now I see why the inner product was written in the same way as a vector mapped by a one--form.
\heb We can actually lower any upper index of any tensor in the same way, for example \(g_{\coa\cob}T^{\cob\coc}\doteq\tensay{T}{\coa}{\coc}\).
\hea You said that we don't bother to fundamentally distinguish vectors and dual vectors. So we can raise an index back up as easily as we lowered it?
\heb Yes we can. For this purpose we define an inverse metric \(g^{\coa\cob}\) by
\begin{eqna}
g^{\coa\cob}g_{\cob\coc}=1^{\coa}_{\coc},
\end{eqna}
in which \(1\) is the unit tensor which does absolutely nothing. We could also think of \(1\) as the metric \(g\) with the other index rised. So first lowering and then raising is doing nothing as it should be. And the same holds of course for first rising and then lowering.
\hea So the picture simplifies even more. We write tensors with indices placed whereever it is practical and then contract. The metric never needs to be written explicitly. Damn this is elegant.
\heb Yeh, however, of course we may sometimes want to write it visible, for example when I write
\begin{eqna}
\dd s^2=g_{\coa\cob}\dd\vec{x}^\coa\dd\vec{x}^\cob
\end{eqna}
I want to emphasize that the metric defines the distance of two nearby points.
\heb Note that rising and lowering were made possible by the existence of the metric. But our familiar space is just on example of a manifold, a highly useful mathematical concept. A manifold is kind of a smooth space with a definite dimension. Not ecery manifold has a distinguished metric. if so, then we may really need to distinguish between vectors and dual vectors.
\heb I should probably mention that sometimes we also do not have gyroscopes at our hands and it is not necessarily possible to parallel transport vectors far away. However, vectors located at the same point can still be added. This is because a manifold---actualy a differenitable manifold, to be precice---is smooth enough to allow parallel transporting vectors for small distances and therefore make moving short arrows one after other possible.
% bold used in space
\subsection{Length of a curve}
\heb Now we can calculate the length of any path or curve in space by
\begin{eqna}
s=\int\dd s=\int\sqrt{g_{\coa\cob}\dd\vec{x}^\coa\dd\vec{x}^\cob}.
\end{eqna}
We can also parametrize the path for example by the elapsed time \(t\) of a body moves along the path. Then we have
\begin{eqna}
s=\int\sqrt{g_{\coa\cob}\der{\vec{x}^\coa}{t}\der{\vec{x}^\cob}{t}}\,\dd t=\int\sqrt{g_{\coa\cob}\dvec{x}^\coa\dvec{x}^\cob}\,\dd t=\int|\dvec{x}|\,\dd t.
\end{eqna}
The first equality comes simply from the fact that
\begin{eqna}
\frac{\dd t}{\sqrt{\dd t\,\dd t}}=1.
\end{eqna}
\subsection{Differentiating tensors}
\heb Vectors can be added together and multiplied by numbers, so we can differentiate a vector \(\vec{x}\) that depends for example on \(t\) as we differentiate any quantity,
\begin{eqna}
\dot{\vec{v}}=\frac{\vec{v}(t+\dd t)-\vec{v}(t)}{\dd t},
\end{eqna}
and the result is a new vector. The variable \(t\) do not need to be time; it can be for example the position on some curve in space, if there is a vector in every point on the curve.
\heb We can do the same for any tensor. Like for almost any product, the Leibnitz rule holds also for outer products of tensors. For example
\begin{eqnb}
\der{}{t}(\vec{v}\cdot\vec{w})&=&\der{}{t}\left(g_{\coa\cob}\vec{v}^\coa\vec{w}^\cob\right)\\
&=&\dot{g}_{\coa\cob}\vec{v}^\coa\vec{w}^\cob+g_{\coa\cob}\dvec{v}^\coa\vec{w}^\cob+g_{\coa\cob}\vec{v}^\coa\dvec{w}^\cob.
\end{eqnb}
If \(\vec{v}\) and \(\vec{w}\) are any unchanging vectors, also their inner product remains unchanged. Therefore \(\dot{g}\equiv0\). The metric tensor is constant and the Leibnitz rule for the inner product reads
\begin{eqna}
\der{}{t}(\vec{v}\cdot\vec{w})=\dvec{v}\cdot\vec{w}+\vec{v}\cdot\dvec{w}.
\end{eqna}
\heb Because the metric is constant, it holds that
\begin{eqna}
\dvec{v}_\coa=\der{}{t}(g_{\coa\cob}\vec{v}^\cob)=g_{\coa\cob}\dvec{v}^\cob,
\end{eqna}
that is, the derivative of a contraction is the contraction of a derivatie. This makes thing even more easy: we can just think of the derivative of a vector---or any other tensor---indices placed anywhere we want.
\hea Allright. Sounds reasonable. But why is Leibniz rule true in the first place? I mean if we have for example functions \(f(t)\) and \(g(t)\), then why
\begin{eqna}
\der{fg}{t}=\dot{f}\,g+f\,\dot{g}?
\end{eqna}
\heb That's a good question. For the change \(\dd(fg)\) it holds that
\begin{eqna}
\dd(fg)=(f+\dd f)(g+\dd g)-fg=\dd f\,g+f\,\dd g+\dd f\,\dd g.
\end{eqna}
Because the changes are small, the last thing is negligible. After dividing by \(\dd t\) we get just what you wrote. As you can see, this reasoning is quite general and does not really care were \(f(t)\) and \(g(t)\) numbers, tensors or whatever.
\heb If we have tensor field, for example a vector \(\vec{v}(\vec{x})\) at every point in space, then the difference \(D\vec{v}\) of vectors at points separated by \(\dd\vec{x}\) is clearly linear in? \(\dd\vec{x}\). We can therefore define a tensor \(\nabla\vec{v}\), called covariant derivative, in such a way that
\begin{eqna}
D\vec{v}^\coa=\nabla_\cob\vec{v}^\coa\dd\vec{x}^\cob
\end{eqna}
for any \(\dd\vec{x}\).
% note: x is not a vector
\hea Why did you write \(D\vec{v}\) instead of \(\dd\vec{v}\)?
\heb It is due to subleties that become important when we use numbers to represent tensors. We may talk about it later.
\hea Ok. I presume that we can do the same for any tensor field and write for example
\begin{eqna}
DT_{\coa\cob\coc}=\nabla_\cod T_{\coa\cob\coc}\dd x^\cod.
\end{eqna}
\heb Yes we can. If the point we consider moves with time, we have we can divide by \(\dd t\) and get
\begin{eqna}
\dot{T}_{\coa\cob\coc}=\frac{D\vec{v}^\coa}{\dd t}=\frac{\nabla_\cod T_{\coa\cob\coc}\dd\vec{x}^\cod}{\dd t}=\nabla_\cod T_{\coa\cob\coc}\dvec{x}^\cod.
\end{eqna}
\heb It looks stupid to have different kinds of d's in the same expression.
\hea Yeah, maybe we should just write
\begin{eqna}
\dot{T}=\frac{D T}{Dt}.
\end{eqna}
\heb Note that differentiating a tensor field requires comparing tensors located at nearby points, which can only be achieved by parallel transport. If the manifold does not have a parallel transport, then the covariant derive cannot be formed.
\hea But we just added vectors by parallel transpoprting them short distances. You said that it can be done because the manifold is smooth.
\heb Yeah, but if we differentiate tensors, the nearby tensors are nerly the same, and their difference is already small. It is of the same order of smallness that the error that comes from parallel transposrting by just using the smoothness and cannot be neglected.??
\section{Perspectives}
\heb Things don't look the same from the perspectives of different persons, right?
\hea Definitely not. A capitalist feels envy for a poor guy getting an unemployment compensation. The capitalist thinks that the poor guy hasn't done anything to earn it. On the other hand the poor guy thinks that the capitalist is acting selfishly since he has so much money that he could save many from starvation but is not willing to do that.
\heb Yeah, that's so very absurd. Even though we have all the technology to give everyone a good life and make hating old--fashioned, we're not willing to pursue that opportunity.
\heb But there are also another kinds of differences of perspectives that are more relevant for us here.
\hea I know, I just wanted to make an important remark. The motion of a rollercoaster looks different when observed from different locations.
\heb Also the look changes if the observer turns his eyes and looks to different locations. But we are smart enough to view space as a somewhat objective thing and can think of things like locations and vectors without choosing any particular observer positioned and orientated in a particular way, right?
\hea Yeah.
\heb But there is an affair that matters.
\hea What is it?
\heb Motion. The differences in the motions of observers cannot be dealt so easily as the differences in positions and orientations.
\hea Why is that? We just have to take into account that locations change due time. If I have two points and their separation \(\dd\vec{x}\), a moving observer sees both points moving and understans that the separation \(\dd\vec{x}\) does not change at all. Both observers can understand it as the same separation vector. I would like to that think vectors and tensor are the same even though people may move with respect to each other.
\heb Well, for the separation vector of two fixed points, it really is so. Also the metric, which represents the overlap of two vectors can be seen as the same, because the overlap of two vectors must remain unchanged if the vectors itself remain unchanged.
\heb But think about the velocity of a particle. One observer sees its position changing by \(\dd\vec{x}\) in a time interval \(\dd t\). But another observer, moving with respect to the first, sees a change \(\dd\pvec{x}=\dd\vec{x}+\dd\vec{o}\) in which \(\dd\vec{o}\) is the motion of the first observer from the perspective of the first. For the velocity vectors \(\dvec{x}\) and \(\dot{\pvec{x}}\) it holds that
\begin{eqna}
\dot{\pvec{x}}=\der{\pvec{x}}{t}=\der{\vec{x}+\dd\vec{o}}{t}=\dvec{x}+\dvec{o}.
\end{eqna}
We cannot think of the velocity vector objectively, unless we assume that there is some natural state of `resting'.
\hea Do we?
\heb Aristotle? and co. assumed that natural resting is not moving with respect to Earth.
\heb But think about some simple phenomena, for example a swinging pendulum. Does the swinging care about motion?
\hea Well, if I put the pendulum on top of a car the air probably disturbs it, but yeah, I get the point. Inside the car it swings just like inside a house standing firmly on rock, at least if the ride is not so bumpy.
\heb Correct. If we eliminate disturbances like wind, the pendulum siwings the same way always when the lab is moving with a constant velocity. A house, a uniformly moving car, an elevator are all equally good labs.
\heb The idea that there is a natural equality of observers that move uniformly with respect to each other is called the principle of relativity. It seems to be very generally true.
\heb However, accelerating observers, I mean those that are moving nonuniformly with each other, are not equal. Pendulum swings differently in carousel and in a car driven by a mad man.
\hea Interesting. And sensible.
\heb If two observers move uniformly, with a constant velocity \(\dvec{o}\) with respect to each other, we have \(\dot{\pvec{x}}=\dvec{x}+\dvec{o}\) as we noted. But for the acceleration \(\ddot{\vec{x}}\) it holds that
\begin{eqna}
\ddot{\pvec{x}}=\der{}{t}\dot{\pvec{x}}=\der{}{t}(\dvec{x}+\dvec{o})=\ddot{\vec{x}}
\end{eqna}
since \(\ddot{\vec{o}}\equiv 0\). The principle of relativity could be loosely formulated by saying that \emph{acceleration is absolute, velocity is not.}
\subsection{Newton's first law}
\heb Now we are ready to formulate an equation describing a law of nature.
\hea Cool!
\heb Consider a body, for example a/the stone, isolated as well as we can from the rest of the world. For simplicity let us neglect the possible rotation of the rock, that is, we think of it as being a point particle.
\hea Well, I think we need to eliminate the resistance of air. But rocks are quite heavy objects compared to their size, so the air resistance really does not matter. Another thing we should eliminate is gravity. But I don't know how we could do that. do you?
\heb Yes and no! You asked a question that will take us very far. Really. But let us not go there yet. If we cannot eliminate gravity, the stone makes an arc. But if the stone moves really fast and we consider only a very small time scale, then gravity does not really have time to have any effect and we may neglect it. Or, we may imagine soomehow compensating the gravitational force with some other force. Or maybe we could just remove gravity somehow.
\hea Okay, let us assume there is no gravity. The the stone would probably go just straight.
\heb Yes it would. According to the principle of relativity, the equation describing the motion of the stone, or any free particle, cannot involve the velocity directly. If we have neglected the effect of the environment, even gravity, then the law describing the stone cannot involve the location of the particle directly and must be independent of the direction where the stone is going. These notions are usually called the homogenity and isotropy of space.
\heb So, write on the blackboard the simplest law, consistent with these requirements, that you can ever imagine!
\heb Come on, be brave.
\hea Hmm.
\begin{eqna}
\ddot{\vec{x}}=0
\end{eqna}
\heb \emph{Voil\`a!} You just invented Newton's first law: free particles move straight, with unchanging velocity.
\hea There are also another equations consistent with our requirements, for example \(\dddot{\vec{x}}=0\). But that would mean that the particle did somehow remember its acceleration. It would also mean that even acceleration were not absolute, only the change of acceleration, or jerk. But our experience tells us that acceleration is absolute. Newton's first law is in great agreement with experiments.
\hea How could it be that the simplest answer tend to be the correct one? I mean, I do really feel that `If I know nothing obout it, what else would I guess', but it still feels a bit, or not even just a bit, muddy.
\heb Say no more. In philosophy its called \emph{Occam's razor} and in some areas of physics the \emph{minimal substitution principle}. I think it is just an integral part of common sense. Impossible to prove or argue for, but also impossible to live without.
\heb It is somewhat similar issue with the issue of probability theory. Why do you expect that out of very many throws of dice about one sixth are sixes?
\hea I have no idea. Or I would say that because of summetry. But I cannot say why symmetry of the dice implies symmetry of the probabilities.
\heb Exactly. It just feel obvious. Even the rigorous mathematical theory of probabilities and measures and stochastics just lean on that asumption.
\hea But it is a very good assumption. What is the chances that it is wrong?
\hea Wait. What did I just say? Probabilities seem to be as enigmatic as time.
\heb Yeah they does. But if we want to focus on time, let's move on. Maybe we can come back to probabilities at some point?.
\hea Yeah, let's do that.
\chapter{Some physics}
\heb If you had to affect the motion of the stone somehow, what would you do?
\hea I could for example push it.
\heb Yeah. You could generate force with your muscles and direct it with your limbs. Let us denote the force you excert on the stone by \(\vec{F}\). It is pretty obvious that force is a vector: it has a direction and a magnitude. If the stone is otherwise free, how does it react to the force?
\hea If Newton's first law says that the velocity of the free particle remains constant, maybe the force changes it.
\heb Yes, yes, yes! Here it comes again, Occam's razor or just plain common sense, and writes
\begin{eqna}
\vec{F}=\ddot{\vec{x}}.
\end{eqna}
Is this good? At least it reduces to the first law if \(\vec{F}=0\).
\section{Newton's second law}
\hea Different stones, for example a small and a big one, react differently to a force.
% motion is continuous and smooth
\heb Right, so let us add a constant \(m\), called mass, which quantifies that and write
\begin{eqna}
\vec{F}=m\ddot{\vec{x}}.
\end{eqna}
This equation is called Newton's second law. The greater the mass of the particle, the more resistant it is for the changes in its motion.
\hea That seems reasonable. But what about \(\vec{F}\), is it clear that it is absolute in the sense that the acceleration is but the velocity is not?
\heb No. But it is also not perfectly clear how to attach an arrow describing the force that comes out of your muscles. At least it is not as easy as drawing an arrow between two fixed points in space.
\hea Here we need to decide how to define things. We need to guess what would be a reasonable definition, and here it is not perfectly clear. Let us assume that the force \emph{is} absolute in the sense in which the acceleration is, and let us use the second law as a definition of force.
\hea Could different definitions lead to different physical results?
\heb Yes they could. That is because the force is something concrete, something that you relly produce with your body. Newton's secobnd law really says that the mass is the only way in which bodies reactions to force differ.
\heb We could for example let the mass depend on the acceleration. And that dependence could be different for different particles. We just guess the Newton's second.
\hea I'm not perfectly comfortable with this reasoning.
\heb I understand you, but the situation could be so much worse! Think about some theories of psychology.
\hea Are you dissing psychology?
\heb No, I'm just pointing out how lucky we are. Our task is so much easier.
\heb And I can tell you that Newton's second law is pretty good a guess---it is valid at very large range of phenomena.
\subsection{Particle in a gravitational field}
\heb We already considered a simple example, the stone thrown up in the air. If we throw the stone straight up, its motion is vertical and we need only one number, say \(h\), to describe the location. We can take \(h\) to be the distance from the stone to the ground.
\heb Could you write down the equation of motion?
\hea Well, I guess the only relevant force in the problem is the gravitational force. It points straight down. Let me call it \(G\). The equation of motion thus reads
\begin{eqna}
G=m\ddot{h}.
\end{eqna}
\heb Yes. Let us scale the unit of force so that \(G\rightarrow mG\) and the equation of motion becomes just \(G=\ddot{h}\).
\heb Does \(G\) depend on anything?
\hea Gravity feels the same on the sea level and on the top of a mountain. So \(G\) probably doesn't depend on \(h\) in any significant way. But I don't know why couldn't it depend on for example \(\dot{h}\).
\heb If you think about it, gravity feels the same in a steady and in a moving elevator, at least if the elevator is not accelerating. So the gravitational force seems to respect the principle of relativity.
\hea Okay, I see. If the \(G\) depended on \(\dot{h}\), the equation of motion would involve the velocity directly and would not be consistent with the principle of relativity. So \(G\) is constant.
% pitaa selittaa mita massa on
\heb Now we have here a very simple differential equation: the second derivative of \(h\) is constant. A second degree polynomial has exactly this property. So what do you do?
\hea I take
\begin{eqna}
h(t)=at^2+bt+c
\end{eqna}
and substitute it to the equation. I get \(G=2a\). So \(a\) must be equal to \(G/2\) and \(b\) and \(c\) can be anything. This fits to my intuition according to which we need to specify the initial velocity and location to know the future motion.
\heb Perfect!
\subsection{Kinetic energy}
\heb In physics there is a concept called work, denoted by \(W\). What does work bring into your mind?
\hea Thinking. Pain, tiredness, but also satisfaction. But yeah, that is probably not relevant here. I also recognize the old--fashionend `blood, sweat and tears' stylee work, for example lifting sldfjsdlf .
\heb That old--fashioned work is physical work. If you for example raahaa a heavy stone on asphalt, the work done is proportional to the distance the stone moves and to the force needed for dragging. So, at least if the particle moves in the same direction than the force points, we have \(\dd W\propto\vec{F}\cdot\dd\vec{x}\).
\hea If the particle moves perpendicularly to the force, then I don't really do work; I just maintain the force. For example if I stand besides a railroad and push with a scateboard a train that passes along, the wheels of the scateboard making sure I don't get disturbed bu the motion of the train, I don't really do anything. I just push. So what you wrote seems reasonable even if the force and motion are not perpendicular to each other.
\heb Yes. Let us define \(\dd W=\vec{F}\cdot\dd\vec{x}\).
\heb If you push some object with a constant force and it moves with a constant velocity, we have
\begin{eqna}
\dot{W}=\der{}{t}(\vec{F}\cdot\dd\vec{x})=\vec{F}\cdot\der{\vec{x}}{t}=\vec{F}\cdot\dvec{x},
\end{eqna}
so the greater the velocity, the greater \(\dot{W}\), or power, is needed. This is in accordance with everyday observations: the faster you drag the stone, the more you sweat.
\heb Now think of accelerating a particle of mass \(m\) from rest to the velocity \(\dvec{x}=\vec{v}\), for example giving a stone some velocity when throwing it. The work that we need to do, denoted in this context by \(T\), is
\begin{eqna}
T=\int_0^\vec{v}\dd W=\int_0^\vec{v}\vec{F}\cdot\dd\vec{x}.
\end{eqna}
Newton's second law tells that \(\vec{F}=m\ddot{\vec{x}}\), so
\begin{eqna}
T=\int_0^\vec{v} m\der{\dvec{x}}{t}\cdot\dd\vec{x}=m\int_0^\vec{v}\dd\dvec{x}\cdot\der{\vec{x}}{t}=m\int_0^\vec{v}\dvec{x}\cdot\dd\dvec{x}.
\end{eqna}
Leibniz rule tells us that
\begin{eqna}
\dd(\dvec{x}^2)=\dd(\dvec{x}\cdot\dvec{x})=\dd\dvec{x}\cdot\dvec{x}+\dvec{x}\cdot\dd\vec{x}=2\dvec{x}\cdot\dd\dvec{x}
\end{eqna}
so we have
\begin{eqna}
T=\puoli m\int_0^\vec{v}\dd(\dvec{x}^2)=\puoli m \vec{v}^2.
\end{eqna}
This is called the kinetic energy of the particle. We can also write it as
\begin{eqna}
T=\puoli mg_{\coa\cob}\vec{v}^\coa \vec{v}^\cob.
\end{eqna}
For the particle in the gravitational field this is just \(\puoli\dot{h}^2\) since we chose the unit of mass so that for that particle it is one.
\heb Obviously, if we have many particles the total kinetic energy is just the sum of individual kinetic energies, since we must accelerate each particle independently of the others. Labeling particles with \(i\), we have
\begin{eqna}
T=\puoli\sum_i m_ig_{\coa\cob}\vec{v}_i^{\coa}\vec{v}_i^{\cob}
\end{eqna}
Now, we can take advantage of the tensor formalism we developed. The above sum is clearly linear in each of the velocities in it. If we have for example three particles, the sum is a linear function of six velocity vectors, that is, a tensor with six lower indices. We can therefore write
\begin{eqna}
T=\puoli m_{\coa\cob\coc\cod\coe\cof}\vec{v}_1^{\coa}\vec{v}_1^{\cob}\vec{v}_2^{\coc}\vec{v}_2^{\cod}\vec{v}_3^{\coe}\vec{v}_3^{\cof}
\end{eqna}
where \(m\) is a tensor, mathematically the direct sum of all \(m_i g\)'s?, which takes care of calculating the kinetic energies of all the particles.
\heb And we can go even further! A you can see, we probably want to contract the velocities of all particles or none of them. Therefore denote three balls with just one and write
\begin{eqna}
T=\puoli m_{\coa\cob}\dot{q}^\coa\dot{q}^\cob.
\end{eqna}
Here \(\dot{q}\) denotes collectively the velocities of all particles.
\hea Damn this formalism is beautiful.
\section{Configuration space}
\hea It looks like we have only one particle now. Funny.
\heb We can think that we really have only one particle, the `system particle', instead of the original \(N\) particles. The system particle moves in \(3N\)--dimensional so--called configuration space. Position in configuration space is often denoted by \(q\) and the velocity naturally by \(\dot{q}\).
\heb Configuration space is a manifold, but unlike the ordinary space, it does not have gyroscopes nor a natural metric. So we cannot parallel transport or change index positions, but that is not a great diffiiculty.
\hea Can we write Newton's second law for the system particle?
\heb Surely we can. For one particle it can be written as
\begin{eqna}
\vec{F}=m\ddot{\vec{x}}\orr\vec{F}_\coa=mg_{\coa\cob}\ddot{\vec{x}}^\cob.
\end{eqna}
For the system particle we should obviously write
\begin{eqna}
F_\coa=m_{\coa\cob}\ddot{q}^\cob,
\end{eqna}
where \(F\) contains all the forces of the individual particles.
We can also write
\begin{eqna}
F=\dot{p}\wheree p_\coa=m_{\coa\cob}\dot{q}^\cob,
\end{eqna}
since the tensor \(m\) is constant, at least if the masses of the particles are constants.
\heb If we have only one particle, we have
\begin{eqna}
\vec{p}_\coa=mg_{\coa\cob}\dvec{x}^\cob\orr\vec{p}=m\dvec{x}
\end{eqna}
and Newton's second law reads
\begin{eqna}
\vec{F}=\dvec{p}.
\end{eqna}
\heb The quantity \(p\) is called momentum. It in some sense quantifies the amount of motion in the system: the greater the mass and the greater the velocity, the greater the momentum is. It is proportional to the velocity, and therefore is not an absolute vector like the acceleration is.% paranteluuu kaikenmoista
\heb In terms of momentum the second law can be read as `Force equals the rate of change of momentum,' that is, force changes the amount of motion.
\hea Sounds very reasonable.% liikkeessä oleva voima voidaan ajatella paikallaan olevana voimana.?ehkä?
\subsection{Potential energy}
\heb We are not the only ones who are able to produce force and do work. Mutual interactions of particles can often be thought of as arising from forces they excert on each other.
\heb Almost any large body can be thought of consisting of a large number of point particles excerting forces on each other. The forces keep the body in shape, but also may give rise for example for vibrations.
\hea So a complicated body can be represented as a point paricle in a large configuration space!
\heb Yes. The mutual forces of the particles appear as an external force acting on the system particle, like the gravitational force for the stone.
\heb The force in the configuration space depend on the state of motion of the system. I could for example depend on \(q\), \(\dot{q}\) and \(\ddot{q}\). However, here we have to restrict to a narrow class of forces, but don't worry, actually the forces we will restrict on are the most interesting forces.
\hea What forces we abandon?
\heb Right now let us abandon forces that depend on the changing of \(q\), that is, we consider forces that only depend on \(q\).
\heb For such a force \(F\) we can calculate how much work must be done to change the configuration from \(q=a\) to \(q=b\). The work \(W\) is
\begin{eqna}
W(a\rightarrow b)=\int_{a}^{b}F\cdot\dd q.
\end{eqna}
This may in general depend on the path taken, but if it does not, we can form a function \(V\) of the configuration \(q\) by setting
\begin{eqna}
V(q)=W(a\rightarrow q).
\end{eqna}
Now we have%pitaa tsekata merkkejá
\begin{eqna}
F\cdot\dd q=-\dd V=-\nabla V\cdot\dd q,
\end{eqna}
where \(\nabla V\) is the gradient of a \(V\), a vector that points points in the direction in which \(V\) grows most fast, its length of course depending on this rate.
\heb Newton's second law can therefore be written as
\begin{eqna}
\dot{p}+\nabla V=0\orr m_{\coa\cob}\ddot{q}^\cob+\nabla_\coa V=0.
\end{eqna}
Looks simlpe, doesn't it!
\heb Now the kinetic energy can be written in a similar from.ldfcdlf
\begin{eqna}
p_\coa=m_{\coa\cob}\dot{q}^\coa=\puoli\dot{\nabla}m_{\coa\cob}\dot{q}^\coa\dot{q}^\cob=\dot{\nabla}_\coa T.
\end{eqna}
Newton's second law becomes
\begin{eqna}
\der{}{t}\dot{\nabla}T+\nabla V=0
\end{eqna}
or, after defining a new function \(L(q,\dot{q})=T-V\), the Lagranngian of the system,
\begin{eqna}
\der{}{t}\dot{\nabla}L-\nabla L=0.
\end{eqna}
The equation of motion in this form is called the Lagrange's equation of motion.
\hea What can you say about the potential energy of our particle in a gravitational field?
\heb The force is constant, so it is almost trivial that it is the derivative of some constant \(\lambda\) times \(h\). The Lagrangian of the stone is therefore
\begin{eqna}
L=\puoli\dot{h}^2-\lambda\,h.
\end{eqna}
\section{Action}
\heb The Lagrangian is actually very cool and useful function. Think about for example the vertically moving stone. If we neglect gravity, the Lagrangian is ridiculously simple, just
\begin{eqna}
L=\puoli\dot{h}^2.
\end{eqna}
The equation of motion is very simple, isn't it?
\hea Well yes, there are no forces so Newton's second law holds and \(\ddot{h}=0\). The speed of the stone is constant.
\heb Along the motion the Lagrangian has some value at every moment, so we can condsider its time integral. It is called action of the motion and is denoted by \(S\). Clearly \(S\) the average value of the Lagrangian multiplied by the time of flight.
\heb Let's say we know something specific about the motion. We know the stone was at two particular moments \(t_a\) and \(t_b\) at at the heights \(h_a\) and \(h_b\).
\hea Ok. So from this information we immediately can calculate the velocity of the stone since we know the stone moves with a constant speed. And then we know also the average value of the Lagrangian.
\heb Now let us imagine that the stone did not move with a constant speed, but the motion still satisfies our boundary conditions. Such a motion is of course fictious, but we're equipped with an imagination. How does the action \(S\) change?
\hea Hmm. I don't know, it probably depends on our imagination on a very complicated way.
\heb But there is something that definitely happens. Think about the average of the speed.
\hea Aha! If the stone moves with a varying speed, it does some zig zag motion, and to get to the destination in time it must on average go faster than if it went uniformly. That means that the value of the action is bigger.
\heb So the realistic motion has the smallest. \(S\) compared to the fictious motions.
\heb if we now turn the gravity on, the Lagrangian gets a new term \(-\lambda h\). For simplicity now assume that \(h_a=h_b\). What can you say about the motion?
\hea It must first go up and then down, all in a smooth fashion. We solved this and the answer was a parabola.
\heb Now the avarage of the speed is not anymore the smallest of all imaginable motions. But the potential energy term is negative and compensates.
\hea Do you mean that by touring high in the potential the stone again manages to make the value of the action the lowest?
\heb At least that's what it looks like! If the stone visited at even higher altitudes that it actually does, it would probably make the average speed too high.
\hea I see, the parabola is probably the best compromise of having a fairly low average speed and a fairly high average potential at the same time.
\heb We can put this so--called principle off least action on a firm ground.
\subsection{Variation of the action}
\heb Let us consider any system with a Lagrangian \(L(q,\dot{q})\). The action of any motion \(q(t)\) is
\begin{eqna}
S[q(t)]=\int L(q,\dot{q})\,\dd t.
\end{eqna}
The action can be thought of as a real--valued function in the space of all possible and impossible motions. We take a motion, divide it to small segments \(\dd t\), fsdf
\heb Now compare this action to the action of a slightly deformed path \(q(t)+\delta q(t)\). The action changes by
\begin{eqna}
\delta S=\int_a^b\delta L\,\dd t.
\end{eqna}
The change in the Lagrangian of course depends on the moment of consideration \(t\) but I do not bother to write the time dependence always explicitly.
\heb Basic differential calculus tells us that
\begin{eqna}
\delta L=\nabla L\cdot\delta q+\dot{\nabla}L\cdot\delta\dot{q}.
\end{eqna}
%The value of the Lagrangian at a specific moment of this path changes by
%\begin{eqna}
% \delta L=\
All these are of course functions of time.
\hea What is \(\delta\dot{q}\)? I guess we can only specify \(\delta q\).
\heb It is of course determined by \(\delta q\). See
\begin{eqna}
\delta\dot{q}=\der{(q+\delta q)}{t}-\der{q}{t}=\der{\delta q}{t}.
\end{eqna}
\hea Oh yes.
\heb It is exactly the same thing as the commuting of partial differentiation.
\heb We defined \(p=\dot{\nabla}L\), so now we have
\begin{eqnb}
\delta L&=&\nabla L\cdot\delta q+p\cdot\der{}{t}\delta q\\
&=&\nabla L\cdot\delta q+\der{}{t}(p\cdot\delta q)-\dot{p}\cdot\delta q\\
&=&\der{}{t}(p\cdot\delta q)-(\dot{p}-\nabla L)\cdot\delta q.
\end{eqnb}
This holds for any motion \(q(t)\), were it realistic or not, and any small variation \(\delta q(t)\). But if \(q(t)\) is a real motion, it satisfies the equation of motion \(\dot{p}-\nabla L =0\) and therefore
\begin{eqna}
\delta L=\der{}{t}(p\cdot\delta q).
\end{eqna}
This is a time derivative, so the change in \(S\) is just
\begin{eqna}
\delta S=[p\cdot\delta q]_a^b.
\end{eqna}
\heb When we talked about the stone, we considered only motions with the same boundary conditions, right?
\hea Right.% Vuorovaikutustermit, ekvivalentit lagranget, using the action principle,
\heb If we compare the realistic motion only to motions that begin and end at exactly same configuration, then \(\delta q(t_a)=\delta q(t_b)=0\) and therefore
\begin{eqna}
\delta S=0.
\end{eqna}
So for a realistic motion that satisfies the equation of motion the action has a stationary value with respect to all small variations that vanish at the endpoints.
\hea Does this work also in the other direction?
\heb Yes it does, as can be easily seen. For any path and any small variation that vanishes at the endpoints we have
\begin{eqna}
\delta =-\int_a^b(\dot{p}-\nabla L)\cdot\delta q\,\dd t.
\end{eqna}
If we know this vanishes, the stuff in the parenthesis must vanish at all times. If it did not do that, we could perform a variation which is nonzero only at for a short period at the right time and get \(\delta S\neq 0\). But the vanishing of the stuff in the parenthesis is just the equation of motion.
\hea So we have two equivalent characterizations of a realistic motion, that it satisfies the equation of motion and that it has a stationary value of the action with respect to small variations that vanish at the endpoints. Interesting.
\hea Why is this called the principle of \emph{least} action?
\heb Think of an ordinary real valued function which has a minimum point, for example \(y=x^2\). If we sit at \(x=0\) and perform a small variation \(\delta x\), the value of the function does not really change. In contrast, if we were sitting at \(x=1\), the value would have changed by \(\delta y=2\,\delta x\). The \(2\) is the derivative of \(x^2\) at \(x=1\).
\heb You can think of the action as a real--valued function in the space of all possible and impossible motions of the system. A real motion is a bottom of a kuoppa of \(S\) in the space of motions like \(x=0\) is the bottom of the \(x^2\) kuoppa on the real number line.
\hea Yes, I see, but if I had \(y=-x^2\), then I would also have \(\delta y=0\) even though there is no hole.
\heb That is an excellent notion. The stationary value can be a local minimum, a local maximum or a saddle point, in the fashion of \(y=x^3\). However, if you think of a realistic motion, you can always add a little zig zag to it, without really changing the potential energy but with increasing the speed. So a real motion cannot be a maximum at least if we have a Lagrangian of the form \(L=T-V\).
\heb So the the word `least' in the name of the principle is a small misnomer. A better name would be `the principle of stationary action'. But it really doesn't matter if we had a minimum or a saddle point; the essential thing is that the value is stationary.
\hea Okay. What if the Lagrangian depended also for example on \(\ddot{q}\)?
\heb If we had \(L=L(q,\dot{q},\ddot{q})\) and assumed that the equation of motion was the same Lagrange's equation, the action would not have had a stationary value. If we try to minimize such an action, we will find that the fourth--order differential equation
\begin{eqna}
\nabla L-\der{}{t}\dot{\nabla}L+\der{^2}{t^2}\ddot{\nabla}L=0
\end{eqna}
must be satisfied by the motion, and even then \(\delta S=0\) would
The equation reduces to Lagrange's equation if \(\ddot{\nabla}=0\).
in which the last term is uutta to the usual Lagrange's equation, were equivalent to the condition that \(\delta S=0\) with respect to all small variations with not only fixed endpoints but also fixed first time derivatives at the endpoints.
\heb This is because a fourth--order differential equation needs four boundary conditions, here the derivatives and configurations of the endpoints. The usual Lagrange's equation is of second order and there fore needs two boundary conditions, reflected to the action principle as the two fixed endpoints.
\subsection{Let us appreciate the action principle}
\heb The two equivalent views, the equation of motion and the action principle, are in some sense a local and a global perspectives to realistic motion of the system.
\hea Yeah, if I think of the equation of motion, I think that something happens at a specific instant of time. But the action principle evidently sees the motion in some sense as a whole.
\heb They are both good perspectives. If you need to actually find a concrete motion of a system, you better solve the equation of motion. But many theoretical aspects are more transparent when being analysed in terms of the action. The principle of least action gives some invaluable insights.
\heb It is good to pause and think of what we've done. You remember?
\hea In short we tried by common sense sort out some simple and reasonable equations that could describe motion oof particles. With a pretty good success we ended up with Newton's lawa, Lagrange's equation and the action principle.
\heb Essentially we noticed that motion of particles is smooth and therefore is describable by differential equations. Our exparience is that the future of a system depends on its initial velocity and configuration.
\hea So we ended up with a configuration space in which the motion is a function of time, a function which obeys a second order differential equation.
\heb Then we restricted the possible forces by assuming that the equation of motion can be written as
\begin{eqna}
\der{}{t}\dot{\nabla}L-\nabla L=0.
\end{eqna}
That equation is equivalent with the action principle.
\heb The restriction forces us to abandon for example frictional forces, but I promise that very many interesting systems---I really mean that---can be described with an action.
\hea So if I'm trying to find a description for some physical system, you encourage me to look for a Lagrangian?
\heb Yes. If we want to describe a system, usually the best approach we can take is to look for a Lagrangian. In some situations it may be easier to find the action directly.
\heb Sometimes boundary conditions may be different and sometimes \(L\) may depend on \(\ddot{q}\). Or only on \(q\).
\hea Your instructions sound a bit muddy.
\heb There is no prescription. There is no road guaranteed to get you where you want. All I can say is that it is probably a good idea to look for some kind of action.
\subsection{Noether's theorem}
\heb If we can change the motion of a physical system in such a way that changed motion is also a realistic, the system is said to possess a symmetry. We can for example think of the motion of a thrown rock. We can move the throw to another city, or perhaps a hundred meters up, and get another possible motion of the rock.
\hea So you ar trying to say that the rock flies in the same way independently of the city and altitude where it has been thrown? I know that. You managed to choose pretty complicated words for expressing such a simple fact.
\heb Please excuse me. The flying rock is saide to possess a translation invariance. But the flight of the rock is an arc. If you turn it on its side, we get a flight which turns lef or right but not down---the rock clearly do not possess roational invariance around an axe parallel to ground.
\heb The translational invariance of the rock is a continuous symmetry, meaning that if we wish, we can do only a very small translation.
\heb A realistic motion, for example an arc of the rock, has two significant, though equivalent, properties: it satisfies the equation of motion and the value of the action is stationary for it. So, what quantifies our symmetry transformed motion?
\hea Obviously the same two properties.
\heb Exactly. Say we perform a small symmetry transformation \(\delta q\) to the motion. The value of the action remains stationary with respecto variations which vanish at the endpoints if the change in the action depends only on the endpoints, that is,
\begin{eqna}
\delta S=f(A)-f(B)
\end{eqna}
for some \(f(q,\dot{q})\). This means that it must hold that \(\delta L=\dot{f}\). On the other hand, for any small change in a realistic motion we got
\begin{eqna}
\delta L=\der{}{t}(p\cdot\delta q).
\end{eqna}
In totality we have
\begin{eqna}
\dot{f}=\der{}{t}(p\cdot\delta q),
\end{eqna}
in other words the quantity
\begin{eqna}
p\cdot\delta q-f
\end{eqna}
\subsection{Looking for an action}
\hea Well, if I'm after a Lagrangian, or an action, do you have any tips?
\heb First of all, if you take any action and multiply it by a number the stationary points obviously do not change. Also, if you take a function \(f=f(q)\) and add its time derivative to a Lagrangian, the stationary points remain unchanged.
\hea Why?
\hea The action changes by
\begin{eqna}
S\rightarrow \int{}
\end{eqna}
% Numerolla kertominen ja derivaatan lisääminen ei muuta mitään
% Systeemien kytkeminen
% Symmetrioiden hyödyntäminen
% Häiriöteoria
\heb If you have two independent systems, you can obviously
\subsection{Constrained systems}
\hea I was thinking that if my system consist of for example two rocks connected by a rigid rod of length \(l\), then the rod constrains the motion. Is there a way to deal with the problem without giving any thoughts on the possibly complicated forces that keep the rod together?
\heb Yes there is. But lets first think about the actual forces that keep the rocks at a fixed distance at each other. The rod is probably perfectly rigid, but acts like a very, very stiff spring.
\hea Yeah, you're right. An infinitely stiff spring is a rigid rod.
\heb A spring has an equilibrium lengthm and if it is stretched or compressed, it generates a force pushing or pulling towards the equilibrium.
\hea Such a force can be obviously described by a potential which is zero at the equilibrium length and higher elsewhere.
\hea So let us subtract from the original Lagrangian of the two rocks a potential function which is zero when the distance of rocks is \(l\), and rises steeply when the distance gets longer or shorter.
\heb I see. Now the Lagrangian implements the contstraining effect of the rock. If the potential has been chosen as rising very steeply, the stationary value of the action can only be achieved if the distance between the rocks is \(l\). Nice.
\hea Now, the action vanishes with respect to all variations of fixed enpoints, including the ones that does not change the distance \(l\) of the rocks.
\heb So we can forget the potential describing the forces of the rod and just find a stationary value of the original action not from all motions but from those that keep the distance between the stones \(l\). This is what I was after.
\hea But how can we achieve that?
\heb There is a trick for doing that. If we have a constraint like the rod in your example, it can be expressed as a function \(g(q)\) of the coordinates in such a way that \(g(q)=0\) exactly when the the constraint is satisfied. In the case of the roocks we could choose \(g\) to be the distance of the rocks minus \(l\).
\heb Now take the original Lagrangia and define a new one \(\primed{L}\) by \(\primed{L}=L+\lambda g(q)\) with \(\lambda\) an additional function of time. Now we perform the usual variation \(\delta q(t)\) \emph{and} a variation \(\delta \lambda(t)\). We get
\begin{eqna}
\delta\primed{L}=\delta L+\lambda\partial g\delta q+g\delta\lambda.
\end{eqna}
delta s on jotain
If we vary only \(\lambda\), we get \(g=0\), which is just the constraint. If we vary \(q\) we get
\begin{eqna}
\dot{p}-\partial L+\lambda \partial g.
\end{eqna}
%partial to nabla
% ALL symmetries of the flying stone
% Constrained motion / Lagrange's multipliers
% The range of applicability of the Lagrangian method
%In one dimension \(\partial_x f(x)\) is a dual vector, because \(\partial_x f\dd x=\dd f\) is a number.
\end{document}
\chapter{Coordinating things}
\section{Basis}
\section{Isometries}
\section{Groups}
\section{The Galilean group}
\section{A handful of particles}
\chapter{Vibrations}
\section{The simple armonic oscillator}
\section{Vibrations of several degrees of freedom}
\section{Fourier transformation}
\section{two particles}
\chapter{Ridiculously many particles}
\section{Phase space}
\section{ddf}
\newpage
\heb Let us consider some observers like you and me. Let's assume that the observers do not move and they all use same units of time and distance. How can these observers differ from each other?
\hea Well, I think everybody is different. Even if two persons were cloned from the same DNA, they have necessarily faced different things in life and become different kind of persons.
\heb Very clever. I wouldn't have known that! But you know that I meant something simpler.
\hea The observers are located at different places.
\heb And their noses may point in different directions. It is interesting to think of how the perspectives of these different observers are related to each other.
\heb We human beings have two eyes and therefore a stereo vision. We can locate things accurately in space by just watching. Say there is a couple of stones around and every observer see them. The question is: how are their points of view, I mean literally the image that gets projected on their verkkokalvo, relate to each other?
\hea Sounds like a complicated question.
\heb Well, the actual transformation of the view may be complicated, but we can say something about them. Let us denote the image on the verkkokalvos of some observer by \(\ket{\Psi}\). This notation is from quantum mechanics; I chose it because it looks so cool. Here it only means the view and nothing quantum mechanical.
\heb If we for example translate the viewer, that is change her location, the view changes. Let us denote the translation by \(\vec{T}\) and write
\begin{eqna}
\ket{\Psi}\rightarrow\vec{T}\ket{\Psi}.
\end{eqna}
\hea Sounds trivial.
\heb I know. Now we can make another translation \(\primed{\vec{T}}\). In totality we have
\begin{eqna}
\ket{\Psi}\rightarrow\vec{T}\ket{\Psi}\rightarrow\primed{\vec{T}}\vec{T}\ket{\Psi}.
\end{eqna}
Clearly both translations together form another translation which we may think of as a product \(\primed{\vec{T}}\vec{T}\). We can also think of doing nothing as a special kind of translation and denote it by \(1\).
\heb This is to say that mathematically the translations form a group. It is clear that the order of translations does not matter; we have \(\primed{\vec{T}}\vec{T}=\vec{T}\primed{\vec{T}}\) for any two translations. Translations are said to commute.
\hea I've heard of groups. If we translate by \(T\) we can always translate back, which I guess we could denote by \(T^{-1}\). It holds that \((\primed{T}T)^{-1}=T^{-1}\primed{T}^{-1}\) because
\begin{eqna}
T^{-1}\primed{T}^{-1}\primed{T}T=T^{-1}1T=1.
\end{eqna}
This is clear because translating and then translating back is doing nothing. I feel stupid when I say that out loud.
\heb Good. Things get more interesting when we consider rotations. Rotations also clearly form a group which in three dimensions is known as \(SO(3)\).
\heb The group of rotations is more interesting because it is not commutative. Look: If I take this chair and do two \(\circc/4\) rotations around vertical and horizontal axes, I get different results depending on the order.
\hea I see, but what is \(\circc\)?
\heb It is the circle constant \(\circc\doteq2\pi\). \(\pi\) is used only for historical reasons; \(\circc\) is much more natural as it represens the whole circle. Check \url{tauday.com} for exhaustive discussion. If you don't like this, just go to the sourcecode of our lives and uncomment one line. The source can be found at \url{konstakurki.co/time}.
\hea Cool.
\chapter{Curvature}
\heb If we take two parallel staight lines in space and extend them, do they remain parallel?
\hea Of course.
\heb How do you know that?
\hea Well, hmm, it seems obvious, doesn't it? I can't see how else could it be. Every straight lines that I've seen too be parallel at some point have always been parallel also everywhere else.
\heb OK, so you understand that it is an empirical observation. And then you of course also understand that it is not necessarily perfectly true, since we have really seen \emph{very} long straight parallel lines. Our picture of our space may be a bit too ideal
\hea Well, in principle yes, but this sounds like nitpicking to me. You said that usually the most simple guess is true, so I propose that parallel lines continue to be parallell since I think it is simpler than `parallel lines may stop being parallel'.
\heb That's surely what I did say. But what really is simple does not necessarily loook like that at first sight. This way or that, I really do have a point.
\heb Look at this karttapallo. Take two straight routes from the equator to the north pole. In the beginning they are parallel but they cross at the north pole and are definitely not parallel there.
\hea Ha ha. This really is stupid. The routes are of course really not straight; a straight route would go away from the surface of Earth to space. Earth is a ball in our three--dimensional space.
\heb Are you absolutely sure about that? Maybe we assume it is so because we have noticed that parallel lines do no continue as parallel and in reality our space is more peculiar that we assume.
\hea I think we can safely assume that light goes truly straight, and we know that a ship sailing away will eventually sink under the horizon. Earth really is a ball.
\heb Well, that is solid I must admit. Earth is a ball. But let me ask you another question. If we take an arrow, parallel transport it, that is transport it without changing the direction it points to, and parallel transport it back, does it end up pointing in the same direction to which it pointed in the beginning?
\hea I could of course ask how you know that the direction does not change, but I guess a gyroscope can be used to handle that issue. If we transport a gyroscope, hell yeah it will come back pointing at the original direction.
\heb Again, look at this model of Earth. Take an arrow and start from the equator, and go up to the north pole. Then go straigh right as long as the arrow is back on the equator. Then go back to the starting point along the equator. The arrow has turned one quarter of a full turn \(\circc\)!
\hea That's an interesting remark. But if you had really parallel transported the arrow, it would have pointed straight away from Earth at the north pole. You are again fooled by the fact that Earth is a ball.
\heb Yes I am. But now I tell you something that's gonna make you shit in your pants.
\heb Gyroscopes have actually been carried on satellites around Earth, and the results show that their directions \emph{do} change.
\hea What?
\heb Yes they do. If you don't believe, check the study of this and that. It can be found at arXiv under the number \arxivreference{1234.5678}.
\hea Don't worry, I believe you. But how is that possible?
\heb Well, that is something that will make us wonder a lot. But we may think that our space is actually intrinsically curved like the surface of Earth is.
\hea Is our space actually floating in some higher dimensional space?
\heb Maybe yes, maybe not. But it is actually not relevant for us, like the fact that Earth is embedded in a three--dimensional space is not relevant for a cartoghaper who just wants to draw useful two--dimensional pictures of land, seas and infrastructure built by human beings.
\heb We therefore are better to get used to curved spaces, or manifolds as they are often called in mathematics.
\section{Riemannian manifolds}
\heb In mathematics a Riemannian manifold is a smooth space in which every curve has some length associated with it. Smooth means that the space is essentially flat if we look only small portions of it.
\heb For example the surface of Earth is a two--dimensional Riemannian manifold. It is smooth; if we take a small piece of this karttapallo it definitely is flat like a piece of paper. We can measure thee length of a curve for example by walking along it and counting the steps.
\heb But for example a cone is not smooth because the kärki is sharp no matter how close we look and thus is is not a Riemannian mmanifold.
\hea Seems like an intuitive concept. I guess our space is a three--dimensional Riemannian manifold?
\heb The truth is actually even more facinating, but right now we regard it as such. In any case manifolds will be vital for us if we want to understand time.
\subsection{Parallel transport}
\subsection{Vectors}
\heb Think of two points on a manifold so close to each other that the manifold is essentially flat on the scale of their separation. We can draw an arrow from the first to second. Let me denote the arrow, which is naturally called the separation of the two points, by \(\dd x\).
\heb Now if there are other two points near the first ones, with a separation \(\dd y\), we can naturally add them by parallel transporting the other to start from the end of the other. We get another vector. Because the manifold is essentially flat in the region containing the vectors there is no ambiguity in carrying out the parallel transport.
\hea You're addoing vectors!
\heb Yes I am!
\hea We could also multiply \(\dd x\) by a number \(a\) producing a separation of two points along the same line as the originals but \(a\) times further apart of each other. And so on, yes, I know about vectors.
\heb The separation \(\dd x\) is naturally a vecor, but we can also have other vectors. For example if we have a particle moving in a manifold, we can consider its location at two different times separated by a small time interval \(\dd t\). If the particle does not move unreasonably fast, the locations at \(t\) and \(t+\dd t\) are close together. Dividing this separation \(\dd x\) by \(\dd t\) we get another arrow
\begin{eqna}
\der{x}{t}\doteq u,
\end{eqna}
the velocity, which is also clearly a vector.
\hea But now \(\dd t\) is very short and so \(u\) might stretch so far that the space cannot anymore be considered as flat?
\heb Well, for general vectors we imagine the small essentially flat region to extend to infinity as perfectly flat. Imagine gluing a flat piece of paper on the karttapallo. In mathematics this flat piece is called a tangent space. If you are interested in mathematical details, consider for example the excellent book \emph{Geometry, Topology and Physics} by Mikio Nakahara.
\hea Ok, I'll check it. Now, if I'm correct, each point have its own tangent space. We must think of a vector to actually be located to somewhere in the manifold, and only vectors located at the same point, and belonging to the same tangent space, can be added, ultimately because it is not clear how to move vectors over long distances.
\heb Exactly, exactly. Now think of the inner product \(A\cdot B\) of two vectors \(A\) and \(B\).
\hea It measures how much \(A\) and \(B\) overlap. It is symmetric, \(A\cdot B=B\cdot A\), and linear in its both arguments. If \(A\) and \(B\) are perpendicular to each other, we have \(A\cdot B=0\).
\heb Yes. Now it is interesting to take the inner product of \(\dd x\) with itself. We can also do the same for \(a\dd x\), which is \(a\) times longer separation. We get
\begin{eqna}
(a\dd x)\cdot(a\dd x)=a^2\,\dd x\cdot\dd x
\end{eqna}
because the inner product is linear in both of its arguments.
\hea I'm getting this! An inner product can be used to measure the distance of closely separated points.
\heb Yes. We can equip tangent spaces with any inner product we want. In Riemannian geometry the inner product is chosen so that
\begin{eqna}
\dd s^2=\dd x\cdot\dd x
\end{eqna}
where \(\dd s\) is the distance of two points separated by \(\dd x\).
\heb The inner product is a bilinear map which takes two vectors and produces a number. It is useful to consider linear maps from any number of vectors to real number. Such objects are called tensors.
\heb The Riemannian inner product is usually denoted by \(g\). It is called the metric tensor, because it is used to measure distance. In mathematics literature one usually writes \(A\cdot B\doteq g(A,B)\), but this kind of notation gets complicated if we have tensors with more `inputs' for vectors. I like to write it as
\begin{eqna}
A\cdot B\doteq g_{\coa\cob}A^\coa B^\cob.
\end{eqna}
Because the inner product is symmetric in its both arguments, we have \(g_{\coa\cob}=g_{\cob\coa}\).
\hea What are those colored balls?
\heb They are wireless connectors that connect the vectors to the inputs of the multilinear map \(g\). The position of the ball determines the input used and color determines to which it is connected.
\hea We can also think of a tensor \(T\) which take only one vector \(V\) and produces a number. That would be written as \(T_\coa V^\coa\). Tensors like \(T\) are called dual vectors.
\heb We can also think of the vector \(V\) as a linear map which takes \(T\) as its input. It really doesn't matter. That is why \(T\) is called a dual vector.
\heb We can also think of first taking an outer product \(T_\coa V^\cob\) and then connecting \(_\coa\) to \(^\cob\). It is easy to verify that the outer product is actually a bilinear map which which takes one vector and one dual vector as its inputs.
\hea I see. We can take tensors with any number of balls upstairs and downstairs, for example \(\tensay{F}{\coa}{\cob}\) and \(\tensya{K}{\coc}{\cod}\). Then we can form the outer product \(\tensay{F}{\coa}{\cob}\tensya{K}{\coc}{\cod}\), which takes inputs exactly as you would expect from the balls.
\heb Yes.
\hea Then we can connect balls, for example \(\tensay{F}{\coa}{\cob}\tensya{K}{\coc}{\coa}\).
\heb Connecting the balls is often called contracting. Mathematically it means taking a trace. An upper ball must always be contracted with a lower one and vice versa.
\hea And the contracted indices are not anymore inputs! A contraction takes one input and one output off a tensor.
\heb finally, when there are no iputs and out puts left, we have just a number. How simple is that?
\hea Very simple and elegant.
\heb In this notation we have
\begin{eqna}
\dd s^2=g_{\coa\cob}\dd x^\coa\dd x^\cob.
\end{eqna}
\subsection{Length of a curve}
Now, think of a curve \(x(t)\) on a manifold. We can calculate its length \(s\) by thinking it consisting of very many small segments. We have
\begin{eqna}
s=\int\dd s=\int\sqrt{g_{\coa\cob}\dd x^\coa\dd x^\cob}.
\end{eqna}
\section{Differentiation}
\hea Could we differentiate things?
\heb If we have a scalar function \(\phi(x)\)on a manifold, that is, we attach a number to each point in the manifold, it is easy to compare neighboring values: just take the difference \(\dd\phi\). If we have a curve \(x(t)\), we can easily calculate the derivative of \(\phi\) with respect to \(t\); it is just \(\dd\phi/\dd t\).
\heb But differentiating a vector field is more difficult because we have to compare vectors belonging to different tangent spaces. How could we do it?
\hea Well, the only natural way of doing it that comes to my mind is of parallel transporting one of the vectors near the other and then doing the comparison.
\section{Differentiation}
\heb If we have tensors defined on a curve or perhaps everywhere on the manifold, it would be nice to be able to differentiate them. But there is a problem: we have to compare neighboring vectors that belong to different tangent spaces. How could we do it?
\hea Well, if we compare vectors the only thing that comes to my mind is of parallel transporting one of the vectors near the other and then doing the comparison. But I don't know what to do to other kinds of tensors.
\heb Damn you're clever. The difference of a vector field \(A(x)\) gotten that way is usually denoted by \(DA\). The capital d is used to emphasize that it is not a simple subtraction of two numbers. Now, if we have a curve \(x(t)\), we can defferentiate \(A\) along it by forming the simple fraction
\begin{eqna}
\frac{DA}{\dd t}\doteq\cder{A}{t}.
\end{eqna}
It is also a vector. We usually write \(D\) instead of \(\dd\) also before \(t\) because mixing two different d's looks stupid.
\hea But what about other tensors?
\heb Oh yes. Actually they can also be naturally parallel transported, meaning that if the original tensor gives some number out of some arbitrary vectors, the parallel transported tensor gives the same number out of the same vectors also parallel transported to the same location.
\hea Like always, the covariant derivative obeys the Leibniz rule.
\end{document}
| {
"alphanum_fraction": 0.7435368053,
"avg_line_length": 59.886548913,
"ext": "tex",
"hexsha": "d18cd4b9b5cd5a0e042b0596e2c6f84efd547c95",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "538949ca01dfcf237bdf3dbebf421a949c3e69a1",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "konstakurki/time",
"max_forks_repo_path": "time.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "538949ca01dfcf237bdf3dbebf421a949c3e69a1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "konstakurki/time",
"max_issues_repo_path": "time.tex",
"max_line_length": 469,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "538949ca01dfcf237bdf3dbebf421a949c3e69a1",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "konstakurki/time",
"max_stars_repo_path": "time.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 23651,
"size": 88153
} |
%-----------------------------------------------------------------------------%
% %
% K A P I T E L 7 %
% %
%-----------------------------------------------------------------------------%
\chapter{Conclusion and Outlook}\label{c7}
\section{Thesis Summary}
This master's thesis was motivated by the generation of physically consistent, efficient motion plans for legged robots.
The premise of the investigation was that whole-body planning leads to more efficient motions than a simple program such as an \gls{IK} solver. Therefore, the core algorithm of the proposed motion planning approach was a recently presented \gls{DDP}-based whole-body \gls{TO}. Building upon this, a generic method for constraining DDP-like solvers was presented to generate dynamically balanced motions. The results were integrated into the recently presented open-source framework Crocoddyl.
Following this, we investigated the \gls{CoP}-based contact stability of the proposed motion planning approach for a wide range of motions with the biologically inspired RH5 humanoid robot. We were able to demonstrate that the resulting motion plans for both dynamic bipedal walking and various jumping tasks are inherently balanced. Additionally, the analysis of highly-dynamic movements allowed the derivation of useful guidelines for future design iterations of the humanoid robot.
Although the focus of this thesis was on motion planning, we evaluated the feasibility of the generated trajectories with a simple online stabilizer. We demonstrated in a real-time physics simulator that the motion plans can be stabilized by a simple control architecture solely based on joint-space position control. Furthermore, it could be shown that for real-world experiments, a control in the task space is indispensable to compensate for deviations between the model and reality.
The final result of this thesis is an efficient motion planning approach that produces inherently balanced motions. This algorithm efficiently generates highly-dynamic movements with flight-phases for various legged systems.
\section{Future Directions}
%DDP is great
We see large potential in using \gls{DDP}-based whole-body \gls{TO} to generate motions for legged systems. Motion planning based on numerical optimization clearly reduces the number of hand-crafted components and allows the specification of high-level tasks directly in the operational space of the robot. This is likely to become even more important as legged robots tackle more difficult terrains that require higher dynamic motions.
%Algorithmic Improvements
From an algorithmic perspective, the formulation can still be improved in a number of ways. The goal of the algorithm is to efficiently and accurately generate physically consistent motions. Promising ways of simultaneously improving both measures are seen by directly embedding inequality constraints as strict bounds inside the \gls{DDP} algorithm, instead of forcing them by penalization.
%Control Approaches: MPC + Online Stabilization
From a control perspective, two successive steps are of particular interest. First, working on an improved online stabilization is a worthwhile undertaking. As discussed in the last chapter, operational space control is inevitable to compensate for modeling errors directly in the task space. Following up, it would be interesting to embed the motion planning inside a \gls{MPC} formulation. If the re-planning is quick enough, the robot will be enabled to act more robustly to unpredicted situations.
%Long-term vision
This direction for future research, namely improving the motion planning algorithm and embedding it in an \gls{MPC} formulation on a real system, seems promising. Intelligently combining optimization-based planning and control may be the key to robots interacting with both the environment and humans in a more natural, dynamic and autonomous way. With some work in this direction, we may soon find legged robots crossing our daily paths or intuitively collaborating with us when assembling infrastructures and exploring foreign planets. | {
"alphanum_fraction": 0.7510469986,
"avg_line_length": 138.6451612903,
"ext": "tex",
"hexsha": "aa4ae53c9da45d95cc5255bb4b677c8593a0fd88",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-03-26T14:30:37.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-26T14:30:37.000Z",
"max_forks_repo_head_hexsha": "29d00b315f5d502fd1378457be2f64cf74049ca0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "julesser/ma-thesis",
"max_forks_repo_path": "tex/chapter7.tex",
"max_issues_count": 8,
"max_issues_repo_head_hexsha": "29d00b315f5d502fd1378457be2f64cf74049ca0",
"max_issues_repo_issues_event_max_datetime": "2020-04-18T12:43:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-04-18T12:28:21.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "julesser/ma-thesis",
"max_issues_repo_path": "tex/chapter7.tex",
"max_line_length": 538,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "29d00b315f5d502fd1378457be2f64cf74049ca0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "julesser/ma-thesis",
"max_stars_repo_path": "tex/chapter7.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-28T08:48:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-28T08:48:54.000Z",
"num_tokens": 766,
"size": 4298
} |
% java code formatting
\definecolor{pblue}{rgb}{0.13,0.13,1}
\definecolor{pgreen}{rgb}{0,0.5,0}
\definecolor{pred}{rgb}{0.9,0,0}
\definecolor{pgrey}{rgb}{0.46,0.45,0.48}
\lstset{
tabsize = 4, %% set tab space width
showstringspaces = false, %% prevent space marking in strings, string is defined as the text that is generally printed directly to the console
commentstyle = \color{pgreen}, %% set comment color
keywordstyle = \color{pblue}, %% set keyword color
stringstyle = \color{pred}, %% set string color
rulecolor = \color{pgrey}, %% set frame color to avoid being affected by text color
basicstyle = \small \ttfamily , %% set listing font and size
breaklines = true, %% enable line breaking
numberstyle = \tiny,
}
\section{System Smart Contracts}
\label{appendix:contract}
Following are the AVM smart contracts (extracted as Java interfaces) for the staking and delegation contracts. Only the functions utilized in the major control flows (as described in appendix \ref{appendix:sequence_diagrams}) are provided. Also note that access modifiers (e.g. public, private, etc.) at the class and method level have also been stripped for brevity.
\begin{lstlisting}[language = Java , escapeinside={(*@}{@*)}]
interface StakerRegistry {
void registerStaker(Address identityAddress, Address signingAddress,
Address coinbaseAddress);
void bond(Address staker);
long unbond(Address staker, BigInteger amount, BigInteger fee);
long unbondTo(Address staker, BigInteger amount, Address receiver,
BigInteger fee);
long transferStake(Address fromStaker, Address toStaker,
BigInteger amount, BigInteger fee);
void finalizeUnbond(long id);
void finalizeTransfer(long id);
void setState(Address staker, boolean newState)
void setSigningAddress(Address newSigningAddress);
void setCoinbaseAddress(Address newCoinbaseAddress);
}
interface PoolRegistry {
Address registerPool(Address signingAddress, int commissionRate,
byte[] metaDataUrl, byte[] metaDataContentHash);
void delegate(Address pool);
long undelegate(Address pool, BigInteger amount, BigInteger fee);
void finalizeUndelegate(long undelegateId);
void redelegateRewards(Address pool);
long transferDelegation(Address fromPool, Address toPool,
BigInteger amount, BigInteger fee);
void finalizeTransfer(long transferId);
BigInteger withdrawRewards(Address pool);
void enableAutoRewardsDelegation(Address pool, int feePercentage);
void disableAutoRewardsDelegation(Address pool);
void autoDelegateRewards(Address pool, Address delegator);
long requestCommissionRateChange(int newCommissionRate);
void finalizeCommissionRateChange(long id);
void updateMetaData(byte[] newMetaDataUrl, byte[] newMetaDataContentHash);
void setSigningAddress(Address newAddress);
}
interface PoolCoinbase {
void transfer(BigInteger amount);
}
\end{lstlisting}
\clearpage | {
"alphanum_fraction": 0.7400924703,
"avg_line_length": 45.8787878788,
"ext": "tex",
"hexsha": "c8e435ecf8981eeb558c8c096e6f5286dc3bdb29",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-11T18:47:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-11T18:47:28.000Z",
"max_forks_repo_head_hexsha": "64516f23f9bfffc3ee50b67d278f3551d54cd6d2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "aionnetwork/unity-engineering-spec",
"max_forks_repo_path": "appendices/contracts.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "64516f23f9bfffc3ee50b67d278f3551d54cd6d2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "aionnetwork/unity-engineering-spec",
"max_issues_repo_path": "appendices/contracts.tex",
"max_line_length": 367,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "64516f23f9bfffc3ee50b67d278f3551d54cd6d2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "aionnetwork/unity-engineering-spec",
"max_stars_repo_path": "appendices/contracts.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 680,
"size": 3028
} |
\section{Task Execution and Observation} \label{sec:results_game}
The game is played twice by the volunteers, once with some initial direction by the observing researcher, and then again without any aid, unless if requested. During the game, the observer’s role is to record the volunteer’s performance in completing the task. Tracking their total game time, their completion time per task, the number of errors they commit, when they have requested help, which tasks the players couldn’t solve correctly, how often the system has caused issues to the players. However, they’re also there to provide support if necessary, and to keep the volunteer sharing about their impressions, as other valuable information may be better accumulated during the process of the play session itself.\\
\subsection{Movement and Time as a Measure } \label{sec:results_game_movementtime}
One of the elements that the observation was meant to register was the total time taken to complete the game, which would be a function of how fast the players managed to learn and recall the task completion gestures. However, this ended up being moreover, influenced by time taken moving around the virtual environment.\\
Both times taken to complete tasks and times taken to become accustomed to movement were predicted as important values to take in consideration, however, this latter wasn’t an important metric, as movement had the same approach on both the Cultural and Non-Cultural Groups. It was going to be tracked in the interest of showing important observation, it became clear that doing so the intended way was going to be impossible. Several volunteers never found a moment where they said they got used to the controls to the very end of the experience, and even found trouble the second time. Meanwhile, other users were immediately proficient with it. Nevertheless, all of them still claimed that the movement was the least enjoyable part of the game. The main complication stated being the transition between the left movement and the right movement, as transitioning between the two involved doing too wide of a movement, and the camera wouldn’t pick up on it right away.\\
But moreover, the most impactful effect of movement over the data was that it made the total game time a noisy metric. Rather being able to take that information and attribute a conclusion to the cultural impact, partial conclusions about technological impact needed to be made as this provides a clear influence. Total times in the first trial were much slower for the Non-Cultural Group Volunteer, but there’s a lot of variance to the values, and for the second trial the values were all much too similar between the two groups, without it being possible to support any conclusion in regard to the change. Individual conclusions may be made, as the fastest times observed in the first trial belong to users reportedly with a high technological interest score (V1, V8, V15), and two proven to be outstandingly fast in the second trial again (V8, V15). Also, there is a user (V9) that was outstandingly slow in the second trial, and who struggled with movement to a much larger extent than everyone in both groups, who coincidentally also reported to have a lower technological interest score than the median. In effect, this user may have struggled with retention of this aspect of the game. However, the lowest times on both trials do not have a definite pattern to the technological interest scores, and even the volunteer who showcased most trouble did not self-report the lowest of all scores. It’s very possible that a correlation exists for higher user confidence with technology and gestural command retention, but, given that technological impact was not the focus of this study and given that the not enough rigor on the was employed to explore the matter and circumvent the potential issue of self-reporting biases, any further analysis was dropped. Reiterating, the thought given to this section is a justification of why movement timing observations were not entirely helpful in drawing meaningful conclusions.\\
For clarity, table \ref{tab:Table_TotalGameTimes} shows the Total Game Time data of every participant including averages and standard deviation on that data.\\
\begin{table}[ht]
\begin{tabular}{lllllllll}
\multicolumn{2}{l}{First Trial} & & & & \multicolumn{2}{l}{Second Trial} & & \\ \cline{2-2} \cline{4-4} \cline{7-7} \cline{9-9}
\multicolumn{1}{l|}{Cultural} & \multicolumn{1}{l|}{Total Time} & \multicolumn{1}{l|}{Non-Cultural} & \multicolumn{1}{l|}{Total Time} & & \multicolumn{1}{l|}{Cultural} & \multicolumn{1}{l|}{Total Time} & \multicolumn{1}{l|}{Non-Cultural} & \multicolumn{1}{l|}{Total Time} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V1} & \multicolumn{1}{l|}{322} & \multicolumn{1}{l|}{V2} & \multicolumn{1}{l|}{642} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V1} & \multicolumn{1}{l|}{357} & \multicolumn{1}{l|}{V2} & \multicolumn{1}{l|}{315} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V3} & \multicolumn{1}{l|}{348} & \multicolumn{1}{l|}{V4} & \multicolumn{1}{l|}{550} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V3} & \multicolumn{1}{l|}{329} & \multicolumn{1}{l|}{V4} & \multicolumn{1}{l|}{361} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V5} & \multicolumn{1}{l|}{355} & \multicolumn{1}{l|}{V7} & \multicolumn{1}{l|}{433} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V6} & \multicolumn{1}{l|}{372} & \multicolumn{1}{l|}{V7} & \multicolumn{1}{l|}{388} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V6} & \multicolumn{1}{l|}{405} & \multicolumn{1}{l|}{V8} & \multicolumn{1}{l|}{356} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V9} & \multicolumn{1}{l|}{530} & \multicolumn{1}{l|}{V8} & \multicolumn{1}{l|}{258} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V9} & \multicolumn{1}{l|}{415} & \multicolumn{1}{l|}{V10} & \multicolumn{1}{l|}{470} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V11} & \multicolumn{1}{l|}{327} & \multicolumn{1}{l|}{V10} & \multicolumn{1}{l|}{321} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V11} & \multicolumn{1}{l|}{336} & \multicolumn{1}{l|}{V12} & \multicolumn{1}{l|}{355} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V13} & \multicolumn{1}{l|}{307} & \multicolumn{1}{l|}{V12} & \multicolumn{1}{l|}{304} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V13} & \multicolumn{1}{l|}{446} & \multicolumn{1}{l|}{V14} & \multicolumn{1}{l|}{556} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V15} & \multicolumn{1}{l|}{267} & \multicolumn{1}{l|}{V14} & \multicolumn{1}{l|}{358} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V15} & \multicolumn{1}{l|}{322} & \multicolumn{1}{l|}{V16} & \multicolumn{1}{l|}{361} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{V17} & \multicolumn{1}{l|}{347} & \multicolumn{1}{l|}{V16} & \multicolumn{1}{l|}{314} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{V17} & \multicolumn{1}{l|}{425} & & & & & & & \\ \cline{1-2} \cline{6-9}
& & & & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{Average} & \multicolumn{1}{l|}{354.5} & \multicolumn{1}{l|}{Average} & \multicolumn{1}{l|}{327.38} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{Average} & \multicolumn{1}{l|}{374.89} & \multicolumn{1}{l|}{Average} & \multicolumn{1}{l|}{465.38} & \multicolumn{1}{l|}{} & \multicolumn{1}{l|}{Std. Dev.} & \multicolumn{1}{l|}{77.91} & \multicolumn{1}{l|}{Std. Dev.} & \multicolumn{1}{l|}{40.49} \\ \cline{1-4} \cline{6-9}
\multicolumn{1}{|l|}{Std. Dev.} & \multicolumn{1}{l|}{47.84} & \multicolumn{1}{l|}{Std. Dev.} & \multicolumn{1}{l|}{108.68} & & & & & \\ \cline{1-4}
\end{tabular}
\caption{\label{tab:Table_TotalGameTimes}Total elapsed time between game start and completion of both Cultural and Non-Cultural Groups for both First and Follow-Up Trial's observations}
\end{table}
\subsection{Volunteer Number 8} \label{sec:results_game_volunteernumber8}
Volunteer number 8 stands out as an unexpected anomaly to the data. To better highlight why this is the case, the following table \ref{tab:Table_Volunteer 8} is collated to the document. It should be noted that, for the purposes of better showcasing the difference between this one and the other volunteers in the Non-Cultural Group, data from the O2 and T2 tasks was not left out.\\
\begin{table}[ht]
\begin{tabular}{|l|l|l|l|}
\hline
& Total User Errors & Total Forgotten Gestures & Total Help Requests \\ \hline
V2 & 5 & 4 & 0 \\ \hline
V4 & 8 & 4 & 1 \\ \hline
V7 & 6 & 4 & 0 \\ \hline
\rowcolor[HTML]{e2ffe1}
V8 & 0 & 0 & 0 \\ \hline
V10 & 4 & 4 & 0 \\ \hline
V12 & 7 & 7 & 1 \\ \hline
V14 & 8 & 3 & 2 \\ \hline
V16 & 6 & 3 & 1 \\ \hline
\end{tabular}
\caption{\label{tab:Table_Volunteer 8}Count of focused performance failures commited by Non-Cultural Group members during the Second Trial}
\end{table}
Despite being in a group that consistently showcases struggles with the game, Volunteer number 8 has performed no observable mistakes. Additionally, they were also the fastest of all participants in completing the tasks and were one of the fastest users in getting used to the movement options. And in the first trial, they may have committed user errors, but all of them were related to aid requests and ensuring they were not about to perform an error ahead of time. Similarly, all delays in completing any of the Tasks by the user were due to them taking a moment to think and recall on their own, and they have removed their hand from within the Leap Motion’s tracking radius to prevent any accidental reading. In other words, they learned the system’s functions and restrictions instantly.\\
Nonetheless, they claimed to have never experimented with systems entirely controlled by means of hand gestures, and to have a low amount of experience with other similar ones, such as Virtual Reality environments with hand tracking controllers. They did however, state that they had a larger degree of experience with consoles with wide arm gestures such as the Nintendo’s Wiimote, but claimed to have never tried the more similar tracking approach used by the Sony Move. Thus, the major characteristic the user had going for them, was their earlier self-reported high interest in the field of Human Interaction, and their efforts towards it given their study of technology at the same university the trials were performed in.\\
In terms of determining if the user is a statistical outlier, the data changes slightly depending based on whether or not the full sample of data from both trials is used or not. Assuming a Normal Distribution of users, the probability of the user’s Standard Score being observed is below 3.44\% (-2.124) with data from both trials and below 1.70\% (-1.828) taking data from only the second trial. Additionally, the level of technological interest showcased is above 2,794\% (1.886). There’s enough of a suspicion the user lands two standard deviations above the median in performance, however, choosing a correct cut-off is a well-known matter known to produce false positives in a test group with low amount number of test items or low population \cite{hambleton1978use} \cite{ingraham1996empirical}. With two oddities among score on a two-tailed cut-off, there’s still least a 5\% chance that the user was incorrectly leveraged as an outlier, and the rigor self-employed in determining further measures beyond performance errors is of question, as well as the potential misgivings of the sample size or methodology. As such, despite the tabled data crossing the 0.05 alpha level on a two tailed cut-off (+/- 1.96) for the second trial on its own, there’s a fear that eliminating the user may be a mistake on the part of the analysis.\\
\subsection{Performances, Memory and User Errors} \label{sec:results_game_performance}
User performance was registered during the game, this was the focal point of the observation protocol. Odd behaviours, mistakes, failure at remembering the correct gesture and help requests were the primary measures employed, targeted primarily at moments during which Tasks were being completed. Table \ref{tab:Table_Performance} provides an overview of a summary of the data collected for both Cultural and Non-Cultural groups during both of the trials, with the dimensions: Average Task Time, Total Number of User Errors, Total Number of Failures (Didn’t use the correct gesture), Total Number of Help Requests. The choice towards using totals rather than converted scores here is found apt due to both groups having the same size, 8 participants, and thus any pre-processing of the data would just lead to the same results. It ought to be highlighted that additional time information was already noted on \ref{tab:Table_TotalGameTimes}.\\
\begin{table}[ht]
\begin{tabular}{lllllllllllll}
\cline{2-13}
\multicolumn{1}{l|}{} & \multicolumn{4}{l|}{O1} & \multicolumn{4}{l|}{T1} & \multicolumn{4}{l|}{O3} \\ \cline{2-13}
\multicolumn{1}{l|}{} & \multicolumn{1}{l|}{Time} & \multicolumn{1}{l|}{Error} & \multicolumn{1}{l|}{Fail} & \multicolumn{1}{l|}{Help} & \multicolumn{1}{l|}{Time} & \multicolumn{1}{l|}{Error} & \multicolumn{1}{l|}{Fail} & \multicolumn{1}{l|}{Help} & \multicolumn{1}{l|}{Time} & \multicolumn{1}{l|}{Error} & \multicolumn{1}{l|}{Fail} & \multicolumn{1}{l|}{Help} \\ \hline
\multicolumn{1}{|l|}{C1} & \multicolumn{1}{l|}{8.63} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{8.56} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{6.22} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} \\ \hline
\multicolumn{1}{|l|}{C2} & \multicolumn{1}{l|}{7.25} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{10.88} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{6.57} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} \\ \hline
\multicolumn{1}{|l|}{NC1} & \multicolumn{1}{l|}{10.17} & \multicolumn{1}{l|}{7} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{6.75} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{5.20} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{1} \\ \hline
\multicolumn{1}{|l|}{NC2} & \multicolumn{1}{l|}{7.50} & \multicolumn{1}{l|}{6} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{8.25} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{8.50} & \multicolumn{1}{l|}{8} & \multicolumn{1}{l|}{6} & \multicolumn{1}{l|}{2} \\ \hline
& & & & & & & & & & & & \\ \cline{2-13}
\multicolumn{1}{l|}{} & \multicolumn{4}{l|}{T3.1} & \multicolumn{4}{l|}{T3.2} & \multicolumn{4}{l|}{O4} \\ \cline{2-13}
\multicolumn{1}{l|}{} & \multicolumn{1}{l|}{Time} & \multicolumn{1}{l|}{Error} & \multicolumn{1}{l|}{Fail} & \multicolumn{1}{l|}{Help} & \multicolumn{1}{l|}{Time} & \multicolumn{1}{l|}{Error} & \multicolumn{1}{l|}{Fail} & \multicolumn{1}{l|}{Help} & \multicolumn{1}{l|}{Time} & \multicolumn{1}{l|}{Error} & \multicolumn{1}{l|}{Fail} & \multicolumn{1}{l|}{Help} \\ \hline
\multicolumn{1}{|l|}{C1} & \multicolumn{1}{l|}{6.33} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{6.89} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{6.33} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} \\ \hline
\multicolumn{1}{|l|}{C2} & \multicolumn{1}{l|}{6.25} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{8.88} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{6.00} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} \\ \hline
\multicolumn{1}{|l|}{NC1} & \multicolumn{1}{l|}{7.50} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{11.13} & \multicolumn{1}{l|}{6} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{7.00} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{1} \\ \hline
\multicolumn{1}{|l|}{NC2} & \multicolumn{1}{l|}{7.38} & \multicolumn{1}{l|}{7} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{10.25} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{6.17} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{0} \\ \hline
& & & & & & & & & & & & \\ \cline{2-5}
\multicolumn{1}{l|}{} & \multicolumn{4}{l|}{T4} & & & & & & & & \\ \cline{2-5}
\multicolumn{1}{l|}{} & \multicolumn{1}{l|}{Time} & \multicolumn{1}{l|}{Error} & \multicolumn{1}{l|}{Fail} & \multicolumn{1}{l|}{Help} & & & & & & & & \\ \cline{1-5}
\multicolumn{1}{|l|}{C1} & \multicolumn{1}{l|}{7.33} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{2} & & & & & & & & \\ \cline{1-5}
\multicolumn{1}{|l|}{C2} & \multicolumn{1}{l|}{11.75} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & \multicolumn{1}{l|}{0} & & & & & & & & \\ \cline{1-5}
\multicolumn{1}{|l|}{NC1} & \multicolumn{1}{l|}{8.00} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{2} & & & & & & & & \\ \cline{1-5}
\multicolumn{1}{|l|}{NC2} & \multicolumn{1}{l|}{9.25} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{0} & & & & & & & & \\ \cline{1-5}
\end{tabular}
\caption{\label{tab:Table_Performance}User Performance Data during the First and Second Trials.}
\end{table}
Of the data displayed, time ended up being the least insightful. It was already discussed prior that total game time did not yield many discernible results due to the influence movement had on the players, without this being one of the differentiated aspects between players. However, discounting all time spent on moving the character, still this doesn’t prove to be a much conclusive metric. By further organizing data and preparing a time comparison table between the Cultural and Non-Cultural Group \ref{fig:FigureTimeComparisonTable}, while it may appear that the Non-Cultural Group has more trouble in quickly solving each of the tasks, this is neither a consistent observation, nor was it possible to prove with 95\% confidence that such was the case. Additionally, it’s not possible to use the data as supporting evidence that the more user error performed by the groups for a task, the slower the group was. Using regressions analysis, the value of R-square is a minute 0.5688, which is far from enough to call a good fit between the two.\\
\begin{figure}[t]
\centering
\includegraphics[width=0.8\paperwidth]{figures/TimeComparisonTable.png}
\caption{\label{fig:FigureTimeComparisonTable}Tast Execution time comparison Table between Cultural and Non-Cultural Groups}
\end{figure}
Naturally, that does seem to point out that fitting Cultural Emblems are a beneficial method of approaching a gestural control scheme, as it seems to lower the negative impact seen both aspects for the Non-Cultural Group. However, there needs to be a tell that makes that clear within the data. To that end, the mistakes committed by this Non-Cultural Group need to be looked at. Were they making a mistake because of the difficulty of the Gestures themselves? Were they confusing the correct gesture from another Task with the one they were trying to Solve? As such, all Non-Cultural Group’s erroneous gestures were registered and looked, and with that, the break-down table \ref{tab:Table_ErrorBreakdown} was made, manifesting the most conspicuous of type of error.\\
\begin{table}[t]
\begin{tabular}{llllllll}
\cline{2-8}
\multicolumn{1}{l|}{} & \multicolumn{1}{l|}{O1} & \multicolumn{1}{l|}{T1} & \multicolumn{1}{l|}{O3} & \multicolumn{1}{l|}{T3.1} & \multicolumn{1}{l|}{T3.2} & \multicolumn{1}{l|}{O4} & \multicolumn{1}{l|}{T4} \\ \hline
\multicolumn{1}{|l|}{Total Mistakes} & \multicolumn{1}{l|}{6} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{8} & \multicolumn{1}{l|}{7} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{1} \\ \hline
\multicolumn{1}{|l|}{Gestural Mistakes} & \multicolumn{1}{l|}{6} & \multicolumn{1}{l|}{4} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{4} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{1} \\ \hline
\multicolumn{1}{|l|}{Emblematic Substitutions} & \multicolumn{1}{l|}{6} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{4} & \multicolumn{1}{l|}{5} & \multicolumn{1}{l|}{1} \\ \hline
\multicolumn{1}{|l|}{External Emblematic Substitutions} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{1} & \multicolumn{1}{l|}{2} & \multicolumn{1}{l|}{0*} & \multicolumn{1}{l|}{4} & \multicolumn{1}{l|}{3} & \multicolumn{1}{l|}{1} \\ \hline
\multicolumn{8}{l}{* All 5 performed the same gesture: Wave; And didn't realize it was wrong.}
\end{tabular}
\caption{\label{tab:Table_ErrorBreakdown}Breakdown of Error Types made by the Non-CulturalGroup in the second trial.}
\end{table}
Of all the mistakes committed, roughly 75\% were Gestural mistakes. These are when the user attempts to perform a gesture that is wrong for the Task and doesn't stop their attempt until after completing the gesture. The remaining 25\% are other types of mistakes which also include errors related to gestures, but that did not involve a full attempt with that given wrong gesture. Now, of these 28 Gestural Mistakes, all except 2 were Emblematic Substitutions. Here, Emblematic Substitutions are mistakes where the volunteer performed an emblem belonging to their culture instead of the requested command, and as would be expected, their choice of Emblems for the task was for the most part, chosen from among gestures the volunteers had performed during their initial Cultural Survey in the pre-test. What this seems to imply is, despite being told clearly not to use those emblems as they would be considered wrong, the participants' memory still made them recall their emblematic gestures over the gesture they actually performed the first time they performed the game. Which is evidence that Emblems are the more natural way performing those tasks. For clarity, the 2 non-emblematic gestures involved Pantomimes, one with the user attempting to say "here" while pointing out proximity to themselves with their hand, and another outright performing Mimicry, grabbing and dragging an invisible object.\\
The final row refers to External Emblematic Substitutions. Here, the word External refers to the Game's gestural command set, and thus, this breaks downs how many of the Emblematic Substitutions brought gestures that weren't present on the game at all. What's important about this difference, is to show how many of the above were split between merely forgetting the gesture and attempting to solve it in the way felt right, or how many actually recalled performing a certain gesture, and were trying to fit it within a context that made sense. One such example of the latter was the Task O1, as all 4 of the non-external substitutions involved the volunteers performing Task O2's gesture, thus, it's very likely that all 4 of these volunteers were trying to recontextualize a gesture they knew rather than successfully recalling the correct gesture taught. Meanwhile, the other two participants that did use an External gesture, performed what would have been the Cultural Groups' command. More on this will be brought up on section \ref{sec:results_surveys_confidence}.\\
This row, however, is a bit open to interpretation, namely in regard to Task T3.1. Also noted in the table, all 5 users performed the exact same mistake of doing a Wave motion to solve this task, which is the method by which two other tasks are solved in the Non-Cultural Group. What's noteworthy about Task T3.1 is it involves calling the attention of a blue humanoid helper that is fully animated in reaction to the participants. And one of the actions this animated helper performs to the player once their attention was caught, is Waving their hand in front of its face. There's a strong reason to believe that the users were actually recalling the response of the helper, rather than misremembering the context in which a waving gesture fits within the game.\\
Ultimately, nearly every substitution ended up resembling the users Cultural Surveys or even the Cultural Group's gesture set. There are strong reasons to believe that well-fitting Emblematic Gestures had an impact in both the short-term learning attainment rate of the game's gesture set, but also of the long term memorization, against lesser-fitting emblematic or options.\\ | {
"alphanum_fraction": 0.5923577563,
"avg_line_length": 278.5794392523,
"ext": "tex",
"hexsha": "df4cd1dffaad5b13ab8e2b4ea9e733fe3e3e1ed1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3d7375e522404e3d21d010ceddb5dbf1514f8803",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "up201306506/ShamanicInterfaceProject",
"max_forks_repo_path": "Docs/Latex/chapters/Results/Game.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3d7375e522404e3d21d010ceddb5dbf1514f8803",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "up201306506/ShamanicInterfaceProject",
"max_issues_repo_path": "Docs/Latex/chapters/Results/Game.tex",
"max_line_length": 1929,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3d7375e522404e3d21d010ceddb5dbf1514f8803",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "up201306506/ShamanicInterfaceProject",
"max_stars_repo_path": "Docs/Latex/chapters/Results/Game.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8435,
"size": 29808
} |
\section{Gaussian Processes}
% GAUSSIAN PROCESSES
\newcommand{\GP}[0]{\mathcal{GP}}
\newcommand{\gpmean}[0]{\mu_{\GP}}
\newcommand{\gpvar}[0]{\sigma_{\GP}^2}
\newcommand{\gpcovar}[0]{\Sigma_{\GP}^2}
% \newcommand{\gpK}[0]{\mathbf{K}_\theta}
% \newcommand{\gpM}[0]{\mathbf{m}_\theta}
% \newcommand{\Kgp}[0]{\mathcal{GP}}
% \newcommand{\lengthscale}{\ell}
% \newcommand{\ynoise}{{\episilon_y^{2}}}
% \newcommand{\xnoise}{{\episilon_x^{2}}}
\begin{table}[h]
\centering
\begin{tabular}{ll}
\toprule
\textbf{Symbol} & \textbf{Meaning} \\
\midrule
$\GP$ & Gaussian process distribution \\
$\gpmean$ & GP predictive mean function \\
$\gpvar$ & GP predictive variance function \\
$\gpcovar$ & GP predictive covariance function \\
\bottomrule
\end{tabular}
\caption{List of the most relevant packages imported by Sleek Template.}
\label{tab:sleek_relevant_packages}
\end{table} | {
"alphanum_fraction": 0.6408376963,
"avg_line_length": 31.8333333333,
"ext": "tex",
"hexsha": "dbcec4568a27561d81547aa96ffc84bbb6845706",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c8c37e2364b4499e36590ecd3d89b3e0cb47b1bf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "IPL-UV/latex_math",
"max_forks_repo_path": "sections/gps.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c8c37e2364b4499e36590ecd3d89b3e0cb47b1bf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "IPL-UV/latex_math",
"max_issues_repo_path": "sections/gps.tex",
"max_line_length": 76,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c8c37e2364b4499e36590ecd3d89b3e0cb47b1bf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "IPL-UV/latex_math",
"max_stars_repo_path": "sections/gps.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 321,
"size": 955
} |
\chapter{Methodology}
\label{chapter:method}
An information system architecture that supports care planning of pressure ulcers requires certain basic functionalities such as
\begin{itemize}
\item capturing bio-mechanical data of the body
\item Analyzing those data
\item collecting risk assessment data
\item providing platform to report and document ulcers
\item networking related people
\item planning schedules.
\end{itemize}
Therefore our information system architecture is supported by pressure sensing mats and mobile apps. Some standard risk assessment scales, ulcer documentation formats are added to the system with appropriate modifications. Additionally scalability, flexibility and cost-effectiveness are two other important characteristics of such system. Our initial scope was to build a pressure sensing mattress system that is capable of recommending optimal repositioning strategies based on bio-mechanical data. As there are no proper evaluation criteria to assess pressure ulcer prevention and the existing bio-mechanical and pathological research in pressure ulcers are inconclusive, we constructed an information system that provide a platform to investigate pressure ulceration phenomenon while providing a tool for care planning by digitizing processes currently done in paper or not done in any systematic way. Existing theories can be used in our system to improve care planning within their limitation.
\section{Expected Deliverables}
\begin{enumerate}
\item Low-cost sensor panel to measure magnitude and duration of pressure at point of contact
\item Information system architecture for continuous pressure monitoring, ulcer documentation and risk assessment.
\item Posture and ulceration point detection by pressure maps to enhance repositioning plan.
\item Mobile App based notification system alarming care-takers about repositioning time
\end{enumerate}
\section{Components and Functionality}
There are three main components of our solution.
\begin{itemize}
\item Information System (Server and Backend)
\item Mobile App
\item Pressure sensing matress
\end{itemize}
\input{figs/component-diagram.tex}
The information system provides basic components of authentication, and data storage for pressure data, personal risk assessment data and ulcer documentation. It is consist of another supplementary sub-component for machine learning models. The information system provides a RESTful API for mobile app clients and pressure sensing mats. App and pressure sensing mat can send and retrieve relevant information from the information system. The mobile app provides user interface for patients/guardians, caretakers and doctors to interacts with the system.
The pressure mat consist of a sensor panel developed by a substrate of piezo-resistive material Velostat\textsuperscript{\textregistered}. The sensor readings are processed one cell by one in the ATMega32\textsuperscript{\textregistered} microcontroller and send to the information system using a NodeMCU/ESP8266\textsuperscript{\textregistered} via WiFi and internet. The information system is capable of integrating other available commercial pressure sensing mattresses without any change of its structure.
In the central server of the information system these pressure data will be filtered and stored. The sleeping postures and ulceration points are identified by these data and pressure at these points is saved in a separate table using Neural Network Models.
There is a notificaation system that sends notifications to the caretakers of patients instructing the repostitioning plan.
\input{figs/functional_block}
\section{Information system back-end}
Information system backend is written in Python using the enterprise level web fullstack designing framework Django\textsuperscript{\textregistered} and hosted in Heroku\textsuperscript{\textregistered} cloud platform. As the database management system we choose Postgresql which is a SQL based relational database management system. All the static media files are stored in a Cloudinary S3 bucket\textsuperscript{\textregistered}. APIs are created from django-rest-framework library and Firebase\textsuperscript{\textregistered} is used to communicate with mobile apps with push notifications.
The web application considered on Django apps (sub-modules) for each main functionality.
\begin{enumerate}
\item Authentication and User Profiles
\item Social connection handling
\item Pressure data
\item Personal Risk Analysis
\item Ulcer Documentation
\end{enumerate}
Neural network models are build and trained using Tensorfow\textsuperscript{\textregistered} and Keras\textsuperscript{\textregistered} libraries and hosted in Heroku\textsuperscript{\textregistered} using popular python backend microframework Flask\textsuperscript{\textregistered}.
\subsection{Authentication and Authorization}
There are user accounts to authenticate the users and there are three groups as doctors, caretakers and patients. These roles and accounts are used to authorize access to particular components. Only users have write or update permission to their personal information, care takers can update there risk assessment data while doctors can update ulcer reporting documentation as well as risk assessment data. Even latter data only accessible to caretakers or doctors who are assigned to relevant patients. Token authentication is used to authenticate access.
To create and account a user is requested to add his username and password and there after he has to use that username and password to log in. Users can update their profile with basic details and profile photos.
\subsection{Social Networking}
All doctors, caretakers, patients can be see each other in search lists. The connection between the users are established via request and confirm mechanism. There are send, show, accept, reject, delete functionalities for a request. Doctors and caretakers can only access data of a patient only if they has been connected to the particular patient. Users can remove others from there connection list.
\subsection{Pressure data}
Pressure data sent from pressure mats are stored in the database via the central server. These data are further analysed with Neural Network Models to find ulceration points. Pressure data is stored in the format \textbf{lx, ly, x, y, p, n} format.
Here,
\begin{description}
\item[lx]: Number of cells over x axis of the mat
\item[ly]: Number of cells over y axis of the mat
\item[x]: x coordinate of the current cell
\item[y]: y coordinate of the current cell
\item[p]: Pressure at the (x,y) cell
\item[n]: frame number (Reading complete mat is a one frame)
\end{description}
This format supports to send cells one by one therefore we can capture even a partial reading. This format do not restrict the resolution to a particular value we decide so changing lx and ly of the request any available pressure sensing mattress can be integrated without any structural change of the system.
\subsection{Machine Learning}
There are two machine learning models to analyze pressure data. One is to identify posture and the other is to identify ulceration points. Since the ulceration occurs in these particular sites it is important to identify pressure at those locations. To locate this points on the pressure mat and to identify repositioning we should find postures of the patient from pressure data.
We used a dataset by university of Dallas to train the neural networks and we used data preprocessing and augmentations to improve the model. There are 13 people in 18 postures (5 major postures supine, left yearning, right yearning, left fetal, right fetal and there slight varaiations with rolling angle and using pillows as wedges.) There are collection of pressure distribution measured by a commercial pressure measuring mattress for 2 mins which is roughly 120 frames for each. The resolution is 64 $\times$ 32.
\subsubsection{Posture Detection Model}
Posture detection model is a sequential model with several convolution and pooling layers before final dense layers. When the input pressure image is given the model outputs the corresponding name of the sleeping posture.
\input{figs/posturemodel}
\begin{description}
\item[Validation] The dataset was divided into a training and holdout set such that data from 9 persons for train and data from 4 persons for holdout.
\item[preprocessing] The pressure images are resized to 32 $\times$ 16, Gaussian noise of variation of 0.08 is added and finally Gaussian filter of variation of 0.5 is applied. This adding extra noise is supposed to regularize the neural network to work in more realistic environments with low cost pressure mattresses.
\item[Data Augmentation] Pressure images are rotates in random angles between by -150 to -150 and Gaussian Noise of variance of 0.1 is added.
\end{description}
The 5 labels for supine, left yearning, right yearaning, left fetal and right fetal are onehot encoded.
Neural network provided 92.45\% holdout set accuracy.
\subsubsection{Ulceration Point Detection Model}
The same dataset was used to train the neural network model for ulceration point detection. We manually created bounding boxes for ulceration points using the annotator tool Labelbox\textsuperscript{\textregistered}. Then we preprocessed images likewise in the previous model. \input{figs/ulceration-point} The four parameters (two coordinates of the upper left corner of the bounding box, height and width) were used to train the model with a mean squared error loss function. There are two inputs to the model. The pressure image and the name of the ulceration point we consider (onehot encoded). Then the model outputs four parameters for the bounding box.
\subsection{Scheduling}
Usually 2h recommendation period for any posture is used an there is no particular order of posture order. However the researchers of University of Dallas tried to find a reposition schedule based on pressure distribution. Unfortunately there risk assessment metric is based on data from closely related research for slightly different problems and their final result is depend on ad-hoc assumptions they used.
In summary if we put the outline of their research in perspective it states that the supine posture is more risky as the both sides of the body is subjected to pressure. Although a side of body subjected to more pressure in left or right posture that is a complete relieving phase for the other side of the body. Although we hessitate about the validity of their arbitrary risk metric and ad-hoc assumption we decided to use their result and recommend a repositioning plan as follows. \input{figs/schedule}
\begin{enumerate}
\item Right Yearning - 3 h
\item Left Yearning - 3 h
\item Supine - 1.5 h
\item Left Fetal - 3 h
\item Right Fetal - 3 h
\end{enumerate}
The left and right postures should be alternatively applied but we do not distinguish between yearning and fetal. As this intervals are below the range of NICE guidelines it could be justified to use these intervals.
\subsection{Personal Risk Assessment}
We considered two existing personal risk assessment scales Braden scale and Waterloo scale. The information system captures data relevant to both scales and calculate corresponding metrics.
The personal risk assessment forms contains following data and expected to be filled by a health care professional (a doctor or a nurse).
\begin{description}
\item[Assessed By]: The doctor or the caretaker (nurse) assessed personal risk
\item[Gender]: Male/Female
\item[Age]: Age of the patient
\item[Weight]: Weight of the patient (kg)
\item[Height]: Height of the patient (cm)
\end{description}
These details should be filled as 1,2,3,4 according to the Braden scale guideline.
\begin{description}
\item[Sensory perception]: Ability to respond meaningfully to pressure-related discomfort
\item[Moisture]: Degree to which skin is exposed to moisture
\item[Activity]: Degree of physical activity
\item[Mobility]: Ability to change and control body position
\item[Nutrition]: Usual food intake pattern
\item[Friction and Shear]
\end{description}
Explicit definition of 1,2,3,4 levels for each category is given in the Braden scale guideline (which is showed by an information box in the mobile app.)
According to the total score the patients are classified into four risk categories.
\begin{tabular}{l r}
Severe risk & $\leq$ 9\\
High risk & 10 - 12\\
Moderate risk & 13 - 14\\
Mild risk & 15 - 18 \\
\end{tabular}\\
These are some other important risk factors (Yes/No binary options).
\begin{itemize}
\item Diabetes mellitus
\item Peripheral vascular disease
\item Cerebral vascular accident
\item Hypotension
\item Hypoalbuminemia
\item Incontinence
\item Venus thrombosis
\end{itemize}
\subsection{Ulcer documentation}
Documenting existing ulcers is an important concern. Treatments are based on proper documentation. This includes basic details related to the wound, surrounding skin and conditions of the patient. We adopted basic components from NPUAP (National Pressure Ulcer Advisory Panel) guidelines and SOS (State of Oklahoma) toolkit to prepare our documentation patteren. We discussed the current state of pressure ulcer documentation with a medical practitioner in Sri Lanka and remove overcomplicating components from these two guidelines. Then we add several additional components and alter the terminology in order to make compatible with medical terminology used in Sri Lanka. Some of the components we introduce here are not currently documented in Sri Lanka.
\begin{description}
\item[Reported by]: The doctor that reports the ulcer
\item[ChangeAddDelete]: The updated date (automatically filled)
\item[Site]: Ulceration points
\item[Stage]: Stage I,II,III,IV, DTI (Deep Tissue Injury), Unstaged (NPUAP classification)
\item[Duration]: Duration (days)
\item[Length]: Length of the ulcer (mm)
\item[Width]: Width of the ulcer (mm)
\item[Depth]: Depth of the ulcer (mm)
\item[Margin]: Regular, Irregular
\item[Edge]: Sloping, Punched out, Rollout, Everted
\item[Edge color]: Color of the edge of the ulcer
\item[Underminings]: (Yes/No)
\item[Sinus tracts]: (Yes/No)
\item[Floor]: Healthy, Granualation Tissue, Slough, Necrotic, Eschar, Epithelial (Multiple selection)
\item[Discharge]: Serous, Purulent, Serosanguineous, Other
\item[Discharge amount]: Small, Medium, Heavy
\item[Surrounding skin]: Warm, Thickend, Hyperpigmented, Hypopignmented, Gangreous, Itching, Swelling (Multiple selection)
\item[Skin sensation]: Good, Impaired
\item[Regional lymph nodes enlarged]: Yes/No
\item[Smell]: Yes/No
\item[Pain]: Yes/No
\item[Progress]: Improved, No change, Stable, Decline
\item[Image]: Image of the ulcer
\end{description}
\section{Mobile app}
Mobile app provides a user interface for basic functionalities of the system. This includes,
\begin{itemize}
\item Login
\item Profile update
\item Search other users
\item Handle social connections
\item Register mattress
\item Personal Risk Assessment
\item Ulcer documentation
\item Notification
\end{itemize}
Notifications are send 5 mins prior to the reposition and another at the moment reposition is planned. Next posture and the period in that posture is given with the notification. If the patient is not turned at the specified time then another notifications are send three times with a 5 min interval.
\input{figs/mobileapp}
\section{Pressure Mat}
There are two difference methods to create a pressure mat. The first method is to combine large number of sensors and the second method is to develop a single substrate of pressure sensing material into a sensor panel. First approach is manufacturably complex. Therefore we selected second approach. Velostat\textsuperscript{\textregistered} is a low cost piezo-resistive material that is used for similar applications. \cite{velostat1,velostat2,velsensor} Selection of piezoresistive material over piezocapacitive material reduce complexity of the sensor interfacing. Resistance can be measured constructing a voltage divider.
\subsection{Calibration of the Material}
The relationship between pressure and resistance of Velostat was evaluated as follows. A piece of 1' $\times$ 1' Velostat was sandwiched between two copper plates and Neoprene sheets to create an individual sensor and the resistance was measured over ascending and descending force.
\input{figs/utm} Universal Testing Machine was used to apply pressure over the Velostat. The same test was conducted for two layers of Velostat. Although the sensitivity becomes double the impact of hysteresis is higher for two layers. The test was confirmed with deadweight as the pressure from the UTM was unstable in the case of 2 layers of Velostat.\input{figs/deadweight}
\subsection{Communicating Pressure Values}
The ATMega32 microcontroller powers up each column one by one though analog multiplexers with 5V and measure the voltage of each row (in a voltage divider with a fixed resister) via analog multiplexers with the in-built analog to digital converter, map it to the corresponding pressure value and communicates the measurement to ESP8266 module via UART communication. A baud rate of 9600 was applied and it was sufficient as pressure measurements are not required in a very high frame rate. The ESP8266 module sent pressure data to the information system server via HTTP API endpoint.
\subsection{Preparing Mat}
Velostat sheet was sandwiched by two Neoprene sheets each one contains set of parallel rows or columns of copper tapes. Each column is attached to the output channels of an analog multiplexer and each rows are attached to the input channels of two multiplexers that works as a one multiplexer in combine.
\input{figs/pressure_mat}
Columns are powered one by one using the analog multiplexer and the voltage is measured over a voltage divider choosing each row by other multiplexers.
Neoprene acts as an insulator to build the copper wire grid. All rows are weakly pulled down according to previous research which shows it reduce cross-talk effects.
\subsection{Sensor reading processing and communication}
The communication between ATMega32 and ESP8266 is via UART and then the WIFI server sends it to server.
| {
"alphanum_fraction": 0.8067130889,
"avg_line_length": 69.8935361217,
"ext": "tex",
"hexsha": "3306261056735ced2c65a2971d715ddb9d86f92b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "62bb85f823b68feca499f3d9c0383c9cc62fea69",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ThamaluM/PrevelcerDoc",
"max_forks_repo_path": "chapters/Methodology.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "62bb85f823b68feca499f3d9c0383c9cc62fea69",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ThamaluM/PrevelcerDoc",
"max_issues_repo_path": "chapters/Methodology.tex",
"max_line_length": 1000,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "62bb85f823b68feca499f3d9c0383c9cc62fea69",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ThamaluM/PrevelcerDoc",
"max_stars_repo_path": "chapters/Methodology.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3998,
"size": 18382
} |
\documentclass{article}
\newsavebox{\oldepsilon}
\savebox{\oldepsilon}{\ensuremath{\epsilon}}
\usepackage[minionint,mathlf,textlf]{MinionPro} % To gussy up a bit
\renewcommand*{\epsilon}{\usebox{\oldepsilon}}
\usepackage[margin=1in]{geometry}
\usepackage{graphicx} % For .eps inclusion
%\usepackage{indentfirst} % Controls indentation
\usepackage[compact]{titlesec} % For regulating spacing before section titles
\usepackage{adjustbox} % For vertically-aligned side-by-side minipages
\usepackage{array, amsmath, mhchem}
\usepackage{hyper ref}
\usepackage{courier, subcaption}
\usepackage{multirow, color}
\usepackage[autolinebreaks,framed,numbered]{mcode}
\usepackage{float}
\restylefloat{table}
\pagenumbering{gobble}
\setlength\parindent{0 cm}
\renewcommand{\arraystretch}{1.2}
\begin{document}
\large
\section*{2-D example: mutual repression}
A suitable place to begin is with the mutual repression example introduced in lecture 9. In this model, X and Y are transcriptional repressors that each bind to each other's promoter. Therefore, the production rate of each protein is a decreasing function $h$ of the other's concentration. Each protein also undergoes normal dilution/degradation, so that overall the model is:
\begin{eqnarray*}
\frac{dx}{dt} = f(x,y) & = & \alpha h(y) - \beta x\\
\frac{dy}{dt} = g(x,y) &= & \alpha h(x) - \beta y
\end{eqnarray*}
In particular, we chose $h$ to be the probability that the repressor was \textit{not} bound to the promoter, and assumed that the repressor bound the promoter according to a Hill equation:
\[ h(x) = 1 - \frac{x^n}{K + x^n} = \frac{K}{K+ x^n} \]
Notice that if we choose to measure $x$ and $y$ in appropriate units, that we can simplify this expression to:
\[ h(x) = \frac{1}{1+ x^n} \]
We will assume the units of [X] and [Y] are chosen appropriately so that no K is needed. The rate laws for this simplified system are:
\begin{eqnarray*}
\frac{dx}{dt} = f(x,y) & = & \frac{\alpha }{1+ y^n} - \beta x\\
\frac{dy}{dt} = g(x,y) &= & \frac{\alpha }{1+ x^n} - \beta y
\end{eqnarray*}
We remained agnostic about what the value of $n$ would need to be. We had just been reminded that changing parameter values could have qualitative effects on the number and types of fixed points. Would we be able to get bistability if the transcription factor's binding curve were hyperbolic, i.e.,$n=1$? Or would we require cooperativity in transcription factor binding, i.e., $n>1$? Is the Hill coefficient $n$ the only important parameter for determining whether bistability will occur?\\
The last time we faced these questions, plotting was the only tool available to us. We found ourselves needing to choose a particular set of parameter values before the direction field and trajectories could be determined at all. Now that we have a better understanding of stability, we can improve on that attempt.
\section*{How many fixed points are there?}
To have bistability, we must have at least two fixed points (we will worry about their stability afterward). We now take a more careful look at how many fixed points are present to begin with.
\subsection*{Simple hyperbolic binding}
Consider first the case where $n=1$: this corresponds to a simple hyperbolic binding curve. A fixed point $(x^*, y^*)$ would need to satisfy:
\begin{eqnarray*}
g(x^*, y^*) = 0 & \implies & y^* = \frac{\alpha/\beta}{1 + x^*}\\
f(x^*, y^*) = 0 & \implies & x^* = \frac{\alpha/\beta}{1 + y^*} = \frac{\alpha /\beta}{ + \left( \frac{\alpha/\beta}{1 + x^*} \right)} = \frac{1 + x^*}{\frac{\beta}{\alpha}\left(1 + x^*\right) + 1 }\\
\end{eqnarray*}
How many points $x^*$ are there that satisfy the latter equality? To determine this, we need to know how often the following two curves intersect:
\begin{eqnarray*}
y_1 = x \hspace{1 cm} \textrm{ and } \hspace{1 cm} y_2 = \frac{1 + x}{\frac{\beta}{\alpha}\left(1 + x\right) + 1 }
\end{eqnarray*}
The first is simply a straight line beginning at zero. The second curve begins at a positive value. Its first two derivatives are
\begin{eqnarray*}
\frac{dy_2}{dx} & = &\frac{\frac{\beta}{\alpha}\left(1 + x\right) + 1 - \frac{\beta}{\alpha} \left(1+x \right)}{\left( \frac{\beta}{\alpha}\left(1 + x\right) + 1 \right)^2} = \frac{1}{\left( \frac{\beta}{\alpha}\left(1 + x\right) + 1 \right)^2} > 0 \hspace{1 cm} \forall \, x\\
\frac{d^2y_2}{dx^2} & = & \frac{-2 \frac{\beta}{\alpha}}{\left( \frac{\beta}{\alpha}\left(1 + x\right) + 1 \right)^3} < 0 \hspace{ 1 cm} \forall x
\end{eqnarray*}
These properties of $y_2$, combined with the fact that $y_2 > y_1$ when $x=0$, imply that $y_1$ and $y_2$ will intersect exactly once. Therefore, there is only one value for $x^*$ when $n=1$. In other words, it is impossible for the system to be bistable when the Hill coefficient $n=1$ because there simply aren't two fixed points to rub together.
\subsection*{Cooperativity ($n>1$)}
Will we be guaranteed to have more than one fixed point if $n>1$, i.e., the transcription factors bind cooperatively? To address this question, we return to the simplified form of our system:
\begin{eqnarray*}
\frac{dx}{dt} & = & \alpha h(y) - \beta x\\
\frac{dy}{dt} & = & \alpha h(x) - \beta y
\end{eqnarray*}
At fixed points $(x^*, y^*)$, both of these time derivatives will be zero, so:
\begin{eqnarray}
\begin{aligned}
x^* & = & \frac{\alpha}{\beta} h(y^*)\\
y^* & = & \frac{\alpha}{\beta} h(x^*) \label{eqn:abstractedfixedpointsmr}
\end{aligned}
\end{eqnarray}
These curves are called the \textit{nullclines} of the system. (The $x$ nullcline is the set of points along which $\dot{x}=0$, and so forth.) Our fixed points are at the intersection of the nullclines, so we need to determine how often the two lines intersect to find the number of fixed points. To illustrate our approach, consider the three plots in Figure \ref{fig:tangent} of the nullclines as the parameter $\frac{\alpha}{\beta}$ is changing (for fixed $n=2$):\\
\begin{figure}[htp] \centering{
\includegraphics[width=1 \textwidth]{tangent.pdf}}
\caption{Plots of the $x$ and $y$ nullclines before, at, and after a bifurcation. Hill coefficient $n=2$.} \label{fig:tangent}
\end{figure}
There are two important take-home messages here. The first is that the number of fixed points depends not just on $n$, but also on $\frac{\beta}{\alpha}$. (Recall that such changes in the number of fixed points as parameters are varied are called bifurcations.) Second, as will hopefully be clear from this illustration, bifurcations occur when the two nullclines become tangent to one another, i.e., when their slopes are equal at a point of intersection (a fixed point). Nullcline equations \ref{eqn:abstractedfixedpointsmr} both hold at fixed points, and we can find an expression for when their slopes are equal as follows:
\begin{eqnarray}
\frac{d}{dx} \left[ y \right] & = & \frac{d}{dx} \left[ \frac{\alpha}{\beta} h(x) \right] = \frac{\alpha}{\beta} \frac{dh(x)}{dx} \label{eqn:fromynullcline}\\
\frac{d}{dx} \left[ x \right] & = & \frac{d}{dx} \left[ \frac{\alpha}{\beta} h(y) \right] \nonumber \\
\frac{\beta}{\alpha} & = & \frac{dh(y)}{dy} \frac{dy}{dx} \hspace{1 cm} \textrm{Since the slopes must be equal, plug in $dy/dx$ from equation \ref{eqn:fromynullcline}:} \nonumber\\
\left( \frac{\beta}{\alpha} \right)^2& = & \frac{dh(y)}{dy} \frac{dh(x)}{dx} = h'(y) \, h'(x) \label{eqn:expressionfortangent}
\end{eqnarray}
We can take the derivative of $h(x)$:
\[ \frac{dh(x)}{dx} = \frac{d}{dx} \left[ \frac{1 }{1 + x^n} \right] = \frac{- n x^{n-1}}{\left( 1 + x^n \right)^2} \]
And plug this into expression \ref{eqn:expressionfortangent} (recalling that we are at a fixed point so equations \ref{eqn:abstractedfixedpointsmr} hold):
\begin{eqnarray*}
\left( \frac{\beta}{\alpha} \right)^2& = & \left[ \frac{- n x^{n-1}}{\left( 1 + x^n \right)^2} \right] \left[ \frac{- n y^{n-1}}{\left( 1 + y^n \right)^2} \right]\\
\left( \frac{\alpha}{\beta n} \right)^2& = & x^{n-1} y^{n-1}
\left( \frac{\alpha/\beta}{ 1 + x^n} \right)^2 \left( \frac{\alpha/\beta}{1 + y^n} \right)^2 = x^{n+1} y^{n+1} = \left( \frac{\alpha}{\beta} \right)^{n+1} \left( \frac{x}{1+x^n} \right)^{n+1}\\
\left( \frac{1}{x} + x^{n-1} \right)^{n+1} & = & n^2 \left( \frac{\alpha}{\beta} \right)^{n-1}
\end{eqnarray*}
For all values of $n>1$ and $x^*>0$, it is possible to find $\alpha/\beta$ to meet this equality. Any value of $\alpha/\beta$ larger than that threshold will result in more than one fixed point. Although we will not show it, the fact that the two nullclines are sigmoidal (i.e. there is one inflection point) will imply that there can never be more than three fixed points.
\section*{Stability of the fixed points}
We have shown that for all $n>1$, it is possible to choose $\alpha/\beta$ so that we have three fixed points. Now we linearize the system around these fixed points in order to determine their stability. First, we will briefly review that method.
\subsection*{Recap of last time}
At the end of lecture 9, we had just learned that for a linear system:
\begin{eqnarray}
\frac{d}{dt} \begin{pmatrix} x_1 \\ \vdots \\ x_n \end{pmatrix} & = & \mathcal{A} \begin{pmatrix} x_1 \\ \vdots \\ x_n \end{pmatrix} \label{eqn:generallinearsystem}
\end{eqnarray}
when $\mathcal{A}$ has $n$ distinct eigenvectors $\mathbf{v}_i$ with corresponding eigenvalues $\lambda_i$, the general solution is:
\begin{eqnarray}
\begin{pmatrix} x_1 \\ \vdots \\ x_n \end{pmatrix} & = & \sum_{i=1}^n c_i \mathbf{v}_i e^{\lambda_i t} \label{eqn:generallinearsolution}
\end{eqnarray}
We had also learned that if the real parts of all eigenvalues are negative, then $(0,0)$ is a stable fixed point. If the real part of any eigenvalue is positive, the origin is not a stable fixed point. \\
We were about to practice applying this approach to study the stability of fixed points of non-linear systems of the form:
\begin{eqnarray*}
\frac{dx_1}{dt}& = & f_1(x_1, \ldots, x_n) = f_1(\mathbf{x})\\
& \vdots & \\
\frac{dx_n}{dt}& = & f_n(x_1, \ldots, x_n) = f_n(\mathbf{x})
\end{eqnarray*}
Recall that a fixed point $\mathbf{x^*}=(x_1^*, \ldots, x_n^*)$ has the property that
\[ f_i(\mathbf{x^*}) = 0 \hspace{1 cm} \forall i \]
We had shown that near a fixed point $\mathbf{x^*}$, a non-linear system can be \textit{linearized} (a process that involves a change of coordinates) by calculating a matrix called the Jacobian, $\mathbf{J}$:
\begin{eqnarray}
\frac{d}{dt} \begin{pmatrix} y_1 \\ \vdots \\ y_n \end{pmatrix} & = & \mathbf{J}_{\mathbf{x^*}} \begin{pmatrix} y_1 \\ \vdots \\ y_n \end{pmatrix} = \begin{pmatrix} \frac{\partial f_1}{\partial x_1} & \cdots & \frac{\partial f_1}{\partial x_n}\\ \vdots & \ddots & \vdots \\ \frac{\partial f_n}{\partial x_1} & \cdots & \frac{\partial f_n}{\partial x_n} \end{pmatrix}_{\mathbf{x^*}} \begin{pmatrix} y_1 \\ \vdots \\ y_n \end{pmatrix} \label{eqn:generallinearizedsystem}
\end{eqnarray}
The stability of this linearized system at the origin is the same as the stability of the non-linear system at the point $\mathbf{x^*}$.
\subsection*{Application to the 2-D mutual repression system}
Recall that the mutual repression system is two-dimensional and can be written as:
\begin{eqnarray*}
\frac{dx}{dt} = f(x,y) & = & \frac{\alpha }{1+ y^n} - \beta x\\
\frac{dy}{dt} = g(x,y) &= & \frac{\alpha }{1+ x^n} - \beta y
\end{eqnarray*}
To linearize around a fixed point $(x^*,y^*)$, we must calculate:
\begin{eqnarray*}
\frac{d}{dt} \begin{pmatrix} a \\ b \end{pmatrix} & = & \begin{pmatrix} \frac{\partial f}{\partial x} & \frac{\partial f}{\partial y}\\ \frac{\partial g}{\partial x} & \frac{\partial g}{\partial y} \end{pmatrix}_{(x^*, y^*)} \begin{pmatrix} a \\ b \end{pmatrix}\\
& = & \begin{pmatrix} - \beta & \frac{-\alpha ny^{n-1}}{(1+y^n)^2} \\ \frac{- \alpha nx^{n-1}}{(1+x^n)^2} & -\beta \end{pmatrix}_{(x^*, y^*)} \begin{pmatrix} a \\ b \end{pmatrix}\\
\end{eqnarray*}
How will we try this out? Once we are certain that we have chosen $\alpha/\beta$ so that we have three fixed points ($n>1$), we numerically solve to find the location of the fixed points; for example:
\[f(x^*,y^*) = g(x^*, y^*) = 0 \implies x^* = \frac{\alpha/\beta}{1 + \left( \frac{\alpha/\beta}{1 + x^{*n}} \right)^n} \]
\begin{lstlisting}
syms x;
n = 3;
c = 2;
vpasolve(x == c/(1 + (c/(1+x^n))^n),x)
\end{lstlisting}
In this example, where $n=3$ and $\alpha/\beta = 2$, the three fixed points are $(1.98, 0.23)$, $(1,1)$, and $(0.23, 1.98)$. Let's try evaluating our Jacobian at the first point:
\begin{eqnarray*}
\frac{d}{dt} \begin{pmatrix} a \\ b \end{pmatrix} & = & \begin{pmatrix} -1 & \frac{- 6 y^{2}}{(1+y^{3})^2} \\ \frac{- 6 x^{2}}{(1+x^{3})^2} & -1 \end{pmatrix}_{(1.98, 0.23)} \begin{pmatrix} a \\ b \end{pmatrix} = \begin{pmatrix} -1 & -0.31 \\ -0.31 & -1 \end{pmatrix} \begin{pmatrix} a \\ b \end{pmatrix}\\
\end{eqnarray*}
We now calculate the eigenvalues and eigenvectors of this matrix in MATLAB:
\begin{lstlisting}
j = [-1, -0.31; -0.31, -1];
[v, l] = eig(j);
disp(sprintf('The first eigenvector is (%0.4f,%0.4f) with eigenvalue %s', v(1,1), v(2,1), num2str(l(1,1))));
disp(sprintf('The second eigenvector is (%0.4f,%0.4f) with eigenvalue %s', v(1,2), v(2,2), num2str(l(2,2))));
\end{lstlisting}
The results are:
\[ \mathbf{v}_1 = \begin{pmatrix} \frac{1}{\sqrt{2}}\\ \frac{1}{\sqrt{2}} \end{pmatrix} \textrm{ and } \lambda_1 = -1.31 \hspace{1 cm} \mathbf{v}_2 = \begin{pmatrix} \frac{-1}{\sqrt{2}}\\ \frac{1}{\sqrt{2}} \end{pmatrix} \textrm{ and } \lambda_2 = -0.69 \]
The fact that the eigenvalues are both real and negative tells us that this is a stable fixed point! We will have noted that the Jacobian would be the same at the point $(0.23, 1.98)$, so both points are stable!.\\
What about the third fixed point, $(1,1)$?
\begin{eqnarray*}
\frac{d}{dt} \begin{pmatrix} a \\ b \end{pmatrix} & = & \begin{pmatrix} -1 & \frac{- 6 y^{2}}{(1+y^{3})^2} \\ \frac{- 6 x^{2}}{(1+x^{3})^2} & -1 \end{pmatrix}_{(1,1)} \begin{pmatrix} a \\ b \end{pmatrix} = \begin{pmatrix} -1 & -1.5 \\ -1.5 & -1 \end{pmatrix} \begin{pmatrix} a \\ b \end{pmatrix}\\
\end{eqnarray*}
\begin{lstlisting}
j = [-1, -1.5; -1.5, -1];
[v, l] = eig(j);
disp(sprintf('The first eigenvector is (%0.4f,%0.4f) with eigenvalue %s', v(1,1), v(2,1), num2str(l(1,1))));
disp(sprintf('The second eigenvector is (%0.4f,%0.4f) with eigenvalue %s', v(1,2), v(2,2), num2str(l(2,2))));
\end{lstlisting}
\[ \mathbf{v}_1 = \begin{pmatrix} \frac{1}{\sqrt{2}}\\ \frac{1}{\sqrt{2}} \end{pmatrix} \textrm{ and } \lambda_1 = -2.5 \hspace{1 cm} \mathbf{v}_2 = \begin{pmatrix} \frac{-1}{\sqrt{2}}\\ \frac{1}{\sqrt{2}} \end{pmatrix} \textrm{ and } \lambda_2 = 0.5 \]
Since one eigenvalue is positive and one is negative, this is a \textit{saddle} point. Trajectories will approach this fixed point along the first eigenvector but all trajectories not lying precisely on this line will ultimately veer off towards one of the two stable fixed points.\\
How do we know that two of the fixed points will \textit{always} be stable? The answer lies in the type of bifurcation that has occurred. If we make a plot of $x^*$ vs. $\alpha/\beta$, indicating stable nodes by solid lines and unstable nodes by dotted lines, we get something that looks like a pitchfork. In our case, we have a \textit{supercritical pitchfork} bifurcation: a single stable fixed point converts to an unstable fixed point as two new stable fixed points arise. Pitchfork bifurcations are often seen in symmetrical systems.\\
In the real world, noise will occasionally perturb the system from its normal trajectory. If such noise is able to push the system from one basin of attraction to the other, then the system is not functionally bistable. The likelihood of this occurring depends on how far separated each fixed point is from the \textit{separatrix} that forms the boundary between the two basins of attraction. We have seen that this distance increases with $\alpha/\beta$ -- that is, approximately with the steady-state expression level of the more highly-expressed protein, as well as with $n$.
\section*{Summary of mutual repression and motivation for discussion paper}
We have shown that cooperativity ($n>1$) is necessary for bistability in this system. This is unfortunate because there are many transcription factors that undergo simple binding, which we might like to use to build bistable circuits. For example, we could use pairs of repressors to make ``bits" of memory in a biological system. However, as you saw in the first discussion paper, the biological repertoire of non-conflicting parts is inherently limited so we would like to be able to use all of the transcription factors available to us, even those that bind simply. \\
In particular, we'd like to be able to use the types of transcription factors that we can \textit{design} to bind to a DNA sequence of our choosing. One such type of transcription factor is called a TAL effector or TALE. We will see that their system explicitly adds positive feedback; in their case, transcription factors bind to their own promoters to increase their own rate of expression.\\
The general idea used in this discussion paper is inspired by real biological systems like \textit{B. subtilis} competence mediation by \textit{comK} and \textit{rok}. A similar scheme is used in the phage lambda lysis/lysogeny cycle: cI and cro as a modified version of this mutual repression system with added competition for binding sites $O_R1$ through $O_R3$.
\end{document} | {
"alphanum_fraction": 0.6936462507,
"avg_line_length": 70.7287449393,
"ext": "tex",
"hexsha": "a7f7ed27b5487c3fede72ff7004163bfd0e13160",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-03-25T14:42:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-01-20T17:43:51.000Z",
"max_forks_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mewahl/intro-systems-biology",
"max_forks_repo_path": "lectures/Lecture 10 - Bistability/lecture 10 notes.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mewahl/intro-systems-biology",
"max_issues_repo_path": "lectures/Lecture 10 - Bistability/lecture 10 notes.tex",
"max_line_length": 627,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mewahl/intro-systems-biology",
"max_stars_repo_path": "lectures/Lecture 10 - Bistability/lecture 10 notes.tex",
"max_stars_repo_stars_event_max_datetime": "2019-01-31T17:23:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-20T17:43:31.000Z",
"num_tokens": 5708,
"size": 17470
} |
\subsection{dataclasses -- Data Classes}
To be done ....
%
| {
"alphanum_fraction": 0.6666666667,
"avg_line_length": 12,
"ext": "tex",
"hexsha": "484ff0bbe0c807da9aeb830915bea991613f0fe8",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2016-11-24T19:55:47.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-11-24T19:55:47.000Z",
"max_forks_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "remigiusz-suwalski/programming-notes",
"max_forks_repo_path": "src/python3/sections/dataclasses.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "remigiusz-suwalski/programming-notes",
"max_issues_repo_path": "src/python3/sections/dataclasses.tex",
"max_line_length": 40,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "remigiusz-suwalski/programming-notes",
"max_stars_repo_path": "src/python3/sections/dataclasses.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-28T05:03:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-28T05:03:18.000Z",
"num_tokens": 14,
"size": 60
} |
\subsection{Voronoi path planning}
Find paths as far as way from obstacles as possible
Divide plane in cells, with a cell around each obstacle
Within a cell, all points are closest to that obstacle
We move along the lines between cells
Overly conservative
Hard to compute in 3d
Small environmental changes can significantly change the graph.
| {
"alphanum_fraction": 0.8005698006,
"avg_line_length": 19.5,
"ext": "tex",
"hexsha": "4adf9729fedd83ecf8b1d4bb97bc8b5c319871c4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/ai/robotics/02-02-voronoi.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/ai/robotics/02-02-voronoi.tex",
"max_line_length": 63,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/ai/robotics/02-02-voronoi.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 75,
"size": 351
} |
%% SECTION HEADER ////////////////////////////////////////////////////////////////////////////////
\section{Thesis outline}
\label{sec16}
| {
"alphanum_fraction": 0.2925170068,
"avg_line_length": 21,
"ext": "tex",
"hexsha": "30d9a16d2a5a74a05a17f606fd09a137f176f91d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c",
"max_forks_repo_licenses": [
"RSA-MD"
],
"max_forks_repo_name": "IFFM-PAS-MISD/aidd",
"max_forks_repo_path": "reports/project_reports/Ijjeh_thesis_template/Chapters/Chapter1/sect16.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"RSA-MD"
],
"max_issues_repo_name": "IFFM-PAS-MISD/aidd",
"max_issues_repo_path": "reports/project_reports/Ijjeh_thesis_template/Chapters/Chapter1/sect16.tex",
"max_line_length": 99,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c",
"max_stars_repo_licenses": [
"RSA-MD"
],
"max_stars_repo_name": "IFFM-PAS-MISD/aidd",
"max_stars_repo_path": "reports/project_reports/Ijjeh_thesis_template/Chapters/Chapter1/sect16.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-03T05:36:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-03T05:36:07.000Z",
"num_tokens": 19,
"size": 147
} |
\chapter{Future challenges and conclusion}
Here, we present some possible directions to extend the presented method to other applications.
\section{Reparameterization trick with Gaussian Processes}
The Bayesian Monte Carlo approach for approximating the integral terms
\begin{displaymath}
\int \Ev[\log(\gu_\mathcal{D}(\theta))] q_i(\theta)
\end{displaymath}
suffers from the fact that the GP kernel and the distribution $q_i$ are limited, since
\begin{displaymath}
\int k(\theta,\theta_i) q_i(\theta) d\theta
\end{displaymath}
must be tractable. One manner to circumvent this is by abandoning the BMC approach to integration, and insteading using the reparameterization trick presented in Section \ref{reparameterizationsection}, turning the BVBMC approach closer in spirit to the one in \ref{gradboostsection}.
One disadvantage is that evaluations of Gaussian Process, although cheap, are not extremely cheap, specially for large datasets, so reparameterization may be considerably slower.
\section{Extending BVBMC to pseudo-marginal likelihoods}
Consider that, as in Section \ref{pseudomarginalsection}, that $\gu(\theta) = Z p(\theta|\mathcal{D}) = p(\mathcal{D}|\theta) p(\theta)$ is truly unavailable, and even the pseudo-marginals $\hat{\gu}(\theta) = Z \hat{p}(\theta|\mathcal{D})$ are expensive to calculate.
Gaussian processes accommodates, for evaluation points $\{\theta_i\}_{i=1}^N$, the noisy estimates $\{\hat{\gu}(\theta_i)\}_{i=1}^N$ of $\{\gu(\theta_i)\}_{i=1}^N$. If one were doing GP regression on $\gu(\theta)$, one could assume that $p(\hat{\gu}|\gu)$ is roughly Gaussian, due to the central limit theorem, and use \eqref{meancovGPR} as surrogate model.
However, in BVBMC (and VBMC), one uses the GP surrogate model on $\log \gu(\theta)$. This implies that, letting $\epsilon = \hat{\gu}(\theta) - \gu(\theta)$ be the noise random variable, one have the model for $\log \bar{\gu}(\theta)$
\begin{equation}
\log \bar{\gu}(\theta) = \log \left(e^{\log \gu(\theta)} + \epsilon \right),
\end{equation}
which is a complicated noise model, to be treated as in \eqref{generalnoise}. Furthermore, one cannot even assume this noise term to be controlled, since, by doing a rough Taylor expansion:
\begin{equation}
\log \bar{\gu}(\theta) = \log \gu(\theta) + e^{-\log \gu(\theta)} \epsilon
\end{equation}
which results in a very large noise for low values of $-\log \gu(\theta)$. One future work could be on how to address this problem.
\section{Scaling BVBMC to a larger number of evaluations}
Given the scaling problems of GP discussed in Section \ref{scalinggpsession}, for unnormalized posteriors $\gu(\theta)$ that can be evaluated in tens of thousands, but that evaluations in hundred of thousands or millions is hard, naive use of BVBMC runs into problems.
A possibility is two use sparse Gaussian Processes, that are briefly reviewed in the Appendix \ref{sparsegpchapter}. However, their integration with BVBMC ran into problems, so further research would be needed.
Of course, one could drop the use of GPs and use other surrogate function methods as done in \cite{Bliznyuk_2012,Marzouk_2007}. However, it should be noted that local approximation methods may not work with variational inference, because of its global approximation nature.
\section{Conclusion}
The method presented in this work, although still immature, has shown promise for use in Bayesian inference, where the likelihood function is expensive of evaluate, that are common in inverse problems.
The associated package in \url{https://github.com/DFNaiff/BVBMC}, built on top of PyTorch, is intended to be easy to use, so a practitioner can quickly employ it in their own problems, if they wish so.
| {
"alphanum_fraction": 0.764721772,
"avg_line_length": 82.2666666667,
"ext": "tex",
"hexsha": "de1db63f2ad2c196873bc4c7634264e4b7bc7574",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8db72a0e588042a582053625ec58cde6a661f2a9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DFNaiff/Dissertation",
"max_forks_repo_path": "tex_copy/chapters/capituloG.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8db72a0e588042a582053625ec58cde6a661f2a9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DFNaiff/Dissertation",
"max_issues_repo_path": "tex_copy/chapters/capituloG.tex",
"max_line_length": 357,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8db72a0e588042a582053625ec58cde6a661f2a9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DFNaiff/Dissertation",
"max_stars_repo_path": "tex_copy/chapters/capituloG.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 941,
"size": 3702
} |
\chapter{Introduction}
Our goal is to enable the research community at large to be able to use FIRM in their own research (simulations/physical experiments).
For this purpose, we have developed an open source implementation of FIRM using OMPL (Open Motion Planning Library).
\section{Feedback Information RoadMaps}
\textcolor{red}{Add References to Ali's pubs}
\section{Open Motion Planning Library}
As stated on the OMPL \url{http://ompl.kavrakilab.org}, \textit{OMPL, the Open Motion Planning Library, consists of many state-of-the-art sampling-based motion planning algorithms. OMPL itself does not contain any code related to, e.g., collision checking or visualization. This is a deliberate design choice, so that OMPL is not tied to a particular collision checker or visualization front end.}
\section{Aim}
To integrate FIRM with Open Motion Planning Library and use with ROS for physical and simulation experiments.
\section{Planned Tasks}
\subsection{Stage 1}
\begin{enumerate}
\item Implement the underlying graph structure using Boost
\item Implement Edge weight class for OMPL
\item Derive from OMPL’s planner class to implement FIRM planner
\item Implement the Belief Space by deriving from OMPL Compound State Space
\item Implement Belief (State) class deriving from OMPL State
\item Using OMPLs sampler class, develop a uniform random belief space sampler
\item Implement observation model (monocular camera with image markers)
\item Implement motion model
\item Implement Controller class (integrate filters)
\item Integrate DP with Planner
\item Complete component integration and test in simulation (PMPL simulator)
\end{enumerate}
\subsection{Stage 2}
\begin{itemize}
\item Integrate with ROS and test in simulation
\item Benchmark with PMPL
\item Test on physical system
\end{itemize}
| {
"alphanum_fraction": 0.7982504101,
"avg_line_length": 42.5348837209,
"ext": "tex",
"hexsha": "6f17457d106eed2a65ba1d7374f968884e666d1f",
"lang": "TeX",
"max_forks_count": 15,
"max_forks_repo_forks_event_max_datetime": "2021-10-04T15:25:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-12-08T12:02:33.000Z",
"max_forks_repo_head_hexsha": "854406d4ddbad5a47c8a4411f8aac0d1424aa93d",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "sauravag/FIRM-OMPL",
"max_forks_repo_path": "report/chap1.tex",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "854406d4ddbad5a47c8a4411f8aac0d1424aa93d",
"max_issues_repo_issues_event_max_datetime": "2017-03-30T00:24:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-12-01T20:51:07.000Z",
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "sauravag/edpl-ompl",
"max_issues_repo_path": "report/chap1.tex",
"max_line_length": 397,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "854406d4ddbad5a47c8a4411f8aac0d1424aa93d",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "sauravag/FIRM-OMPL",
"max_stars_repo_path": "report/chap1.tex",
"max_stars_repo_stars_event_max_datetime": "2021-10-09T08:42:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-03T09:44:52.000Z",
"num_tokens": 403,
"size": 1829
} |
\subsection{Test Case Normalization and Generalization}
Understanding ArcPy failures without delta-debugging \cite{DD} to reduce the
test cases to a readable size is, essentially, impossible \cite{MinUnit}. The
typical test case, before reduction, is 600-2,000 steps long. Even
after delta-debugging, however, once test cases are more
comprehensible in size, and contain no purely extraneous steps,
understanding ArcPy failures is difficult.
To address this problem, as well as other issues (some of which, such
as triaging large numbers of failing tests, are not at present
problems for ArcPy testing), we developed an algorithm to
\emph{normalize} test cases \cite{ICSTnorm}. This algorithm applies a series of term
rewriting rules to reduce the number of variables in a test case,
reduce the complexity of API calls made, and other modifications. In
the case of ArcPy, this often also further reduces test case length
beyond what standard delta-debugging can achieve. For example, of the
first five crashes detected (some of which turned out to be variations
of one underlying problem), normalization reduced the length of the
delta-debugged test case from 19 to 11 steps, from 18 to 14 steps,
from 27 to 20 steps, from 20 to 16 steps, and from 10 to 9 steps. In
the last case, the one step removed gave important information about
the problem.
In addition to normalization, we found it essential to apply
generalization \cite{SmartCheck,ICSTnorm} to test cases. This
algorithm, also produced to aid ArcPy testing, is in a sense the
opposite of normalization. Normalization takes many tests that differ
in unimportant ways and converts them to one, simple, sometimes
canonical (one test per fault) form. Generalization takes a single
test, and produces annotations that describe how the test could be
modified while retaining the property of interest --- e.g.,
generalization answers such questions as:
\begin{itemize}
\item Could this
constant value be different, and the test still fail?
\item Could these two API calls be swapped in their position in the
test case, and the test still fail?
\item Could this freshly created object replace this complex,
much-modified object in this API call, and the test still fail?
\end{itemize}
Together, normalization and generalization have greatly aided our
understanding of complex ArcPy test cases: normalization provides a
standard structure for failures, and makes constant values as small as
possible. Generalization tells us when these values can be changed,
without altering the disposition of the test. The faults described in
this paper are all presented as normalized and generalized test cases. | {
"alphanum_fraction": 0.8026217228,
"avg_line_length": 54.4897959184,
"ext": "tex",
"hexsha": "d18c2d5e4cbec0e3b0e66349b3dbe39064b71c58",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c77dda6abf616c9dfcd052762c5b07bf4368ddde",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "15821361594/python-automated-test",
"max_forks_repo_path": "deprecated/papers/STTTjournal/normgen.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c77dda6abf616c9dfcd052762c5b07bf4368ddde",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "15821361594/python-automated-test",
"max_issues_repo_path": "deprecated/papers/STTTjournal/normgen.tex",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c77dda6abf616c9dfcd052762c5b07bf4368ddde",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "15821361594/python-automated-test",
"max_stars_repo_path": "deprecated/papers/STTTjournal/normgen.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 602,
"size": 2670
} |
\section{Parsing and printing}
\label{sec:parsing-printing}
\subsection{Grammar}
At the time of writing this thesis, LISA does not have a formalized concrete syntax grammar for its language but rather a simple printer that is used to display proofs in a more readable way. In this project I defined a grammar for the first-order logic language with sequents of the front-end (\autoref{fig:grammar}). Unlike the former printer, the grammar is designed to be unambiguous so that we could use it for serialization/persistence later on, and is as close as possible to the original printer of LISA. The main challenge in designing a grammar for our language is due to the fact that the distinction between terms and formulas is made early on, in other words it is not possible to construct ill-typed trees. This subtlety opens the door to potential ambiguities where it is unclear if a node should be interpreted as a term or a formula. The front-end avoids this issue thanks to a carefully designed grammar and a type checker that resolves an intermediate representation into well-typed trees.
\begin{figure}[H]
\centering
\begin{framed}
\grammarindent6.2cm
\begin{grammar}
<char-alpha> ::= `a'-`z' | `A'-`Z' | `_'
<char-alphanumeric> ::= <char-alpha> | `0'-`9'
<identifier> ::= <char-alpha> <char-alphanumeric>*
<schematic-identifier> ::= `?' <identifier>
<schematic-connector-identifier> ::= `??' <identifier>
<label-identifier> ::= <identifier>
\alt <schematic-identifier>
\alt <schematic-connector-identifier>
<infix-label> ::= `\(\Leftrightarrow\)' | `\(\Rightarrow\)' | `\(\lor\)' | `\(\land\)' | `\(=\)'
<prefix-label> ::= `\(\neg\)'
<binder-label> ::= `\(\forall\)' | `\(\exists\)' | `\(\exists!\)'
<term> ::= <term> <infix-label> <term>
\alt <prefix-label> <term>
\alt <binder-label> <identifier> `.' <term>
\alt <label-identifier>
\alt <label-identifier> `(' <term> (`,' <term>)* `)'
\alt `(' <term> `)'
<context> ::= `\\' <identifier> (`,' <identifier>)* `.'
<top-term> ::= <context>? <term>
<terms> ::= <term> (`;' <term>)*
<sequent> ::= <context>? <terms>? `\(\vdash\)' <terms>?
<partial-sequent> ::= <context>? ((`...' (`;' <term>)*) | <terms>) `\(\vdash\)' \\
(((<term> `;')* `...') | <terms>)
\end{grammar}
\end{framed}
\caption[BNF grammar]{BNF grammar for the front. The precedence of operators is the usual one. The current system uses an extended syntax, not shown here, which simply adds convenient aliases for common symbols of set theory (empty, singleton and pair sets, constants, etc.).}
\label{fig:grammar}
\end{figure}
\subsection{Parsing}
A parser for this grammar was written using \code{scala-parser-combinators} \cite{Moors2008}, a recursive-descent parsing library for Scala. The parser alone is not sufficient to generate the final trees; an intermediate phase called \textbf{resolution} performs type checking and resolves the intermediate representation into its final form. The procedure is dictated by the rules listed in \autoref{fig:typing-rules}. Given the type of the top-level term, we can show by induction that it is possible to recover all the remaining types in the tree, and thus reconstruct the final representation of the tree.
\begin{figure}[H]
\centering
\begin{framed}
\begin{gather}
\frac{\Gamma, x_1, ..., x_n \vdash t_{l1}: \mathcal{F} \quad ... \quad \Gamma, x_1, ..., x_n \vdash t_{rm}: \mathcal{F}}{\Gamma \vdash \backslash x_1, ..., x_n. t_{l1}, ..., t_{ln} ``\vdash'' t_{r1}, ..., t_{rm}: \mathcal{S}} \tag{\textsc{(partial) Sequent}} \\[1em]
\frac{\Gamma, x_1, ..., x_n \vdash t: \mathcal{F}}{\Gamma \vdash \backslash x_1, ..., x_n. t: \mathcal{F}} \tag{\textsc{Top-Level Formula}} \\[1em]
\frac{\Gamma, x \vdash t: \mathcal{F}}{\Gamma \vdash B x. t: \mathcal{F}} \mkern3mu B \in \{\forall, \exists, \exists!\} \tag{\textsc{Binder}} \\[1em]
\frac{\Gamma \vdash t: \mathcal{F}}{\Gamma \vdash !t: \mathcal{F}} \quad\quad
\frac{\Gamma \vdash t_1: \mathcal{F} \quad \Gamma \vdash t_2: \mathcal{F}}{\Gamma \vdash t_1 \ast t_2 : \mathcal{F}} \mkern3mu \ast \in \{\Leftrightarrow, \Rightarrow, \lor, \land\}
\tag{$\textsc{Connector}_{1,2}$} \\[1em]
\frac{\Gamma \vdash t_1: \mathcal{F} \quad ... \quad \Gamma \vdash t_n: \mathcal{F}}{\Gamma \vdash {??x}(t_1, ..., t_n) : \mathcal{F}} \quad\quad
\frac{}{\Gamma \vdash {??x}: \mathcal{F}}
\tag{$\textsc{Connector}_{3,4}$} \\[1em]
\frac{\Gamma \vdash t_1: \mathcal{T} \quad ... \quad \Gamma \vdash t_n: \mathcal{T}}{\Gamma \vdash (?)x(t_1, ..., t_n): \mathcal{T}} \quad\quad
\frac{}{\Gamma \vdash {?x}: \mathcal{T}} \quad\quad
\frac{}{\Gamma \vdash x: \mathcal{T}} \mkern3mu x \notin \Gamma
\tag{$\textsc{Function}_{1,2,3}$} \\[1em]
\frac{\Gamma \vdash t_1: \mathcal{T} \quad \Gamma \vdash t_2: \mathcal{T}}{\Gamma \vdash t_1 \ast t_2: \mathcal{F}} \mkern3mu \ast \in \{=\} \quad\quad
\frac{\Gamma \vdash t_1: \mathcal{T} \quad ... \quad \Gamma \vdash t_n: \mathcal{T}}{\Gamma \vdash (?)x(t_1, ..., t_n): \mathcal{F}}
\tag{$\textsc{Predicate}_{1,2}$} \\[1em]
\frac{}{\Gamma \vdash (?)x: \mathcal{F}} \tag{$\textsc{Predicate}_3$} \\[1em]
\frac{}{\Gamma, x \vdash x: \mathcal{T}} \tag{\textsc{Variable}}
\end{gather}
\end{framed}
\caption[Type inference rules]{Type inference rules. $\mathcal{T}$, $\mathcal{F}$ and $\mathcal{S}$ represent term, formula and sequent types respectively. The meta symbol $x$ represents identifiers, while $t$ represents parsed terms. An optional question mark symbol can precede identifiers. Given a well-formed top-level formula, it is always possible to unambiguously type all the children terms.}
\label{fig:typing-rules}
\end{figure}
We also implemented a printer and parser for kernel proofs (\autoref{fig:grammar-kernel}). Such proofs look like the one in \autoref{fig:simple-lisa-proof}.
\begin{figure}[H]
\centering
\begin{framed}
\grammarindent6.2cm
\begin{grammar}
<indentation> ::= ` '*
<line-feed> ::= `\\n'
<integer> ::= `-'? (`1'-`9') (`0'-`9')*
<step-name> ::= ...
<kernel-step> ::= <indentation> <integer> \\
<step-name> \\
<integer> (`,' <integer>)* \\
<sequent> \\
(`[' <top-term> (`;' <top-term>)* `]')?
<kernel-proof> ::= <kernel-step> (<line-feed> <kernel-step>)*
\end{grammar}
\end{framed}
\caption[BNF grammar for kernel proofs]{BNF grammar for kernel proofs. The symbol $\langle\textit{step-name}\rangle$ is defined by an expression containing only terminals, but omitted for conciseness.}
\label{fig:grammar-kernel}
\end{figure}
\subsection{Compile-time string interpolation}
\label{sec:parsing-printing-string-interpolation}
While the parser was originally designed for runtime usage, it can also be used by the compiler at compilation-time. This is possible thanks to the fact that the Scala 3 allows multi-stage metaprogramming. The user can manipulate values (\code{T}), expressions for these values (\code{Expr[T]}) or even expressions of expressions for these values (\code{Expr[Expr[T]]}); all within the same program.
The idea was to exploit this mechanism to guarantee safe parsing at compile-time. Thus, if the user attempts to parse an invalid string literal, the compiler would raise a type error. The implementation of that feature is relatively straight forward and is done within a macro. First we extract the string literal value from the expression, then we call our parser on it and finally convert the resulting tree to an expression (at the meta-level, that is converting a \code{T} to an \code{Expr[T]}). The last step requires defining conversion methods for all ADT members.
It turns out we can do better than that. Scala offers a feature called \textit{string interpolator} which additionally allows variables to be ``inserted'' within the string literal (\autoref{fig:string-interpolation-general}). Moreover, it only works on string literals thus guaranteeing the recovery of the value at compile-time.
\begin{lstlisting}[language=Scala,caption={[String interpolation general example]{Simple demonstration of the string interpolation mechanism in Scala. The \code{s} interpolator simply calls \code{toString} on each variable passed and concatenates all the parts together.}},label={fig:string-interpolation-general},captionpos=b]
val s1: String = "world"
val s2: String = s"Hello $s1!" // Hello world!
\end{lstlisting}
This feature has a nice application in our case: not only can we enforce interpolated variables to be terms or formulas but we can also check their type with respect to the context they appear in. For instance, in the expression \code{formula"\$a /\textbackslash\ b"}, the variable \code{a} cannot be a term (in fact, it must be a formula). In addition to terms and formulas, we may also allow the interpolation of labels which is very useful to bind names dynamically, e.g. \code{formula"\$p(s)"}. Notice that the previous expression is structurally different from \code{formula"\${p(term"s")}"}, although it results in the same formula.
\begin{figure}[H]
\captionsetup[subfigure]{margin=0cm}
\centering
\begin{subfigure}{0.25\linewidth}
\centering
% Runtime
\begin{tikzpicture}[auto, on grid, node distance=1.5cm and 2cm, block/.style = {draw, fill=white, rectangle, minimum height=1cm, minimum width=2cm}, none/.style = {draw=none}]
\node [none] (input) {$\small\code{String}\normalsize$};
\node [block, below = of input] (lexer) {Lexer};
\node [block, below = of lexer] (parser) {Parser};
\node [block, below = of parser] (typer1) {Typer 1};
\node [none, below = of typer1, draw=none] (output) {$\small\code{T}\normalsize$};
\draw [->] (input) -- (lexer);
\draw [->] (lexer) -- (parser);
\draw [->] (parser) -- (typer1);
\draw [->] (typer1) -- (output);
\end{tikzpicture}
\caption{Runtime parsing}
\label{fig:parsing-runtime}
\end{subfigure}
\qquad\qquad
\begin{subfigure}{0.4\linewidth}
\centering
% Compile-time
\begin{tikzpicture}[auto, on grid, node distance=1.5cm and 2.25cm, block/.style = {draw, fill=white, rectangle, minimum height=1cm, minimum width=2cm}, none/.style = {draw=none}]
\node [none] (input) {$\small\code{Expr[StringContext]}\normalsize$};
\node [block, below left = of input] (lexer) {Lexer};
\node [block, below right = of lexer] (parser) {Parser};
\node [block, below = of parser] (typer1) {Typer 1};
\node [block, below = of typer1] (typer2) {Typer 2};
\node [block, below = of typer2] (converter) {Converter};
\node [none, below = of converter, draw=none] (output) {$\small\code{Expr[T]}\normalsize$};
\draw [->] (input) -| (lexer);
\draw [->] (lexer) |- (parser);
\draw [->] (input) -- (parser);
\draw [->] (parser) -- (typer1);
\draw [->] (typer1) -- (typer2);
\draw [->] (input) -- ++(2.25cm, 0) -- ++(0, -6cm) -- (typer2);
\draw [->] (typer2) -- (converter);
\draw [->] (converter) -- (output);
\end{tikzpicture}
\caption{Compile-time parsing, with variable interpolation}
\label{fig:parsing-compile-time}
\end{subfigure}
\caption[Parsing phases]{Phases for regular runtime parsing (a) and compile-time string interpolation (b). \code{T} is one of \code{Sequent}, \code{Formula} or \code{Term}, depending on the interpolator that was called.}
\label{fig:multi-stage-parsing}
\end{figure}
To implement this feature, we must execute the different parsing phases separately (\autoref{fig:multi-stage-parsing}). One can observe that in a well-formed interpolator, tokens (as output by the lexer) cannot possibly overlap different string parts. That property let us do lexing on each string part independently. From this step we can then identify all the potentially taken identifiers allowing us to safely create fresh ones. Then, all the variables are replaced by fresh schemas; still represented as tokens at this point. Finally, we can proceed to the parsing and type checking phases. The first type checking phase will assign types to all of our fresh schemas, while the second will ensure that these assigned types agree with the Scala types of the interpolated variables. A disagreement or an error at an earlier phase is mapped to a positioned Scala type error. Finally all that is left to do is substitute those schemas by their actual value, and convert the value into an expression tree. Notice that we cannot do that at compilation-time as we don't have access to the variables' values: only their type. But this makes sense: types model the range of allowed values, the rest of the information is to be discovered at runtime. Therefore the tree should be constructed at compilation-time, while the final substitution must happen at runtime.
We also studied the possibility of implementing \code{unapply} macros for pattern matching, which could be useful in a tactic language. We concluded that the implementation would be somewhat similar to \code{apply}. We could also make use of the matcher, for instance when matching partial sequents. However, the support for inlining on string interpolators \code{unapply} is currently only partial\footnote{\href{https://github.com/lampepfl/dotty/issues/8577}{github.com/lampepfl/dotty/issues/8577}}.
\begin{lstlisting}[language=Scala,caption={[String interpolation unapply]{An example of the possibilities offered by unapply and string interpolators. This is not currently implemented.}},label={lst:string-interpolator-unapply}]
formula"(?b \/ ?c) => p(?x)" match {
case formula"$f => $p($x)" => ...
}
\end{lstlisting}
| {
"alphanum_fraction": 0.7024646584,
"avg_line_length": 71.4867724868,
"ext": "tex",
"hexsha": "6f7bde13acaa7dd03d549e3777e8cf62b2434e42",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "34a77336497cd15ce5f005639d758f002c234d00",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "FlorianCassayre/master-project",
"max_forks_repo_path": "thesis/report/chapters/7-parsing-printing.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "34a77336497cd15ce5f005639d758f002c234d00",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "FlorianCassayre/master-project",
"max_issues_repo_path": "thesis/report/chapters/7-parsing-printing.tex",
"max_line_length": 1360,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "34a77336497cd15ce5f005639d758f002c234d00",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "FlorianCassayre/master-project",
"max_stars_repo_path": "thesis/report/chapters/7-parsing-printing.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-22T14:37:35.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-21T16:29:22.000Z",
"num_tokens": 3909,
"size": 13511
} |
\subsection{\soarb{version}}
\label{version}
\index{version}
\subsubsection*{Synopsis}
version
\end{verbatim}
\subsubsection*{Options}
No options
\subsubsection*{Description}
This command gives version information about the current Soar kernel. It returns the version number and build date which can then be stored by the agent or the application.
| {
"alphanum_fraction": 0.7932011331,
"avg_line_length": 32.0909090909,
"ext": "tex",
"hexsha": "689153c26c5dd1f1c84264a56768e7e7ff7eea76",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "sleyzerzon/soar",
"max_forks_repo_path": "Documentation/ManualSource/cli/version.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "sleyzerzon/soar",
"max_issues_repo_path": "Documentation/ManualSource/cli/version.tex",
"max_line_length": 174,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "sleyzerzon/soar",
"max_stars_repo_path": "Documentation/ManualSource/cli/version.tex",
"max_stars_repo_stars_event_max_datetime": "2016-04-01T04:02:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-04-01T04:02:28.000Z",
"num_tokens": 82,
"size": 353
} |
\documentclass[a4paper,12pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{a4}
\usepackage{lipsum}
\usepackage{graphicx}
\usepackage{float}
\usepackage{listings}
\usepackage{color}
\usepackage{hyperref}
\usepackage{cite}
\usepackage{textgreek}
\usepackage{amsfonts}
\usepackage[margin=1in]{geometry}
\title{
{\Huge \bf Power Systems Lab}\\
\vspace{0.25in}
{\bf Experiment 9}\\
Laboratory Report
\vspace{1in}
}
\author{
\bf Syed Alisamar Husain, 17BEE012\\
B.Tech Electrical Engg, 8th Semester
}
\begin{document}
\begin{titlepage}
\maketitle
\vspace*{\fill}
\begin{center}
{\bfseries Department of Electrical Engineering} \\
Jamia Millia Islamia, New Delhi
\end{center}
\thispagestyle{empty}
\end{titlepage}
\newpage
\begin{center}
\huge Experiment 9
\vspace{0.5in}
\end{center}
\section{Objective}
Design a surge arrestor used in transmission lines
using Simulink.
\section{Theoretical Background}
{\bf A surge arrester is a device to protect electrical equipment from over-voltage
transients} caused by external (lightning) or internal (switching) events.
Also called a surge protection device (SPD) or transient voltage surge
suppressor (TVSS), this class of device is used to protect equipment in
power transmission and distribution systems.
The energy criterion for various insulation material can be compared by
impulse ratio. A surge arrester should have a low impulse ratio, so that
a surge incident on the surge arrester may be bypassed to the ground
instead of passing through the apparatus.
To protect a unit of equipment from transients occurring on an attached
conductor, a surge arrester is connected to the conductor just before it
enters the equipment. The surge arrester is also connected to ground and
functions by routing energy from an over-voltage transient to ground if
one occurs, while isolating the conductor from ground at normal operating
voltages. {\bf This is usually achieved through use of a varistor,
which has substantially different resistances at different voltages.}
\subsection{Construction of Metal Oxide Surge Arrester}
The zinc oxide is a semiconducting material of N-type. It is pulverised and finely grained.
More than ten doping materials are added in the form of fine powders of insulating oxides
such as Bismuth, Antimony Trioxide, Cobalt Oxide, Manganese Oxide, Chromium oxide.
The powder is treated with some processes, and the mixture is spray dried to obtain a dry powder.
The dry powder is compressed into disc-shaped blocks.
The blocks are sintered to obtain a dense poly- crystalline ceramic.
The metal oxide resistor disc is coated with a
conducting compound to protect the disc from undesirable environmental effect.
\begin{figure}[H]
\centering
\includegraphics[width=3in]{img/ZNO-surge-diverter.jpg}
\end{figure}
\pagebreak
\section{Implementation}
\subsection{Basic Model}
A basic Implementation can be done with a controlled current source triggered by a step
function, via a parallel RL branch. The surge arrestors are between the line to ground.
\begin{figure}[H]
\centering
\includegraphics[width=6in]{img/basic.png}
\end{figure}
\subsection{Transmission Line Model}
A 735 kV equivalent transmission systems feeds a load through a 200 km transmission line.
The line is series compensated at the middle point and shunt compensated at its receiving end.
{\bf A fault is applied at the load terminals.}
The line is shunt compensated by a 110 MVAR per phase inductor at the load end.
The line is protected by metal oxide varistors (MOV). The series varistor MOV1 consists
of 30 columns protecting the capacitor at 2.5 times its rated voltage
(rated voltage is obtained for a 2000 kA line rated current).
The corresponding protection voltage (defined at 500 A per column) is 185 kV.
\begin{figure}[H]
\centering
\includegraphics[width=6in]{img/model.png}
\end{figure}
For simplicity, only one phase of the transmission system is modeled.
All parameters correspond to positive-sequence.
\pagebreak
\section{Observations}
In the transmission line model, it is observed that at the moment of the fault, the load voltage
should drop to zero but it doesn't since the surge arrestor continues to conduct.
The load current peaks at 4900 A and stays constant during the fault.
As the fault is cleared, both the load voltage and load current resume their initial values.
\begin{figure}[H]
\centering
\includegraphics[width=6in]{img/line_current_rms.png}
\caption{Load Voltage and Line Current (RMS)}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=5in]{img/line_surge.png}
\caption{Voltage and Current through Surge Arrestor}
\end{figure}
\section{Result}
We designed a model of a surge arrestor being used on a transmission line using Simulink,
and observed the voltage and current charecteristics of the device during a fault condition.
\end{document} | {
"alphanum_fraction": 0.7451996928,
"avg_line_length": 37.4676258993,
"ext": "tex",
"hexsha": "ef49386005dcedb1839c4ace822c377719149915",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3b6fc0fde5bed2eaee396cab92e60ed280c4cd8b",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "zrthxn/powersyslab",
"max_forks_repo_path": "Ex 9/SurgeArrester.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3b6fc0fde5bed2eaee396cab92e60ed280c4cd8b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "zrthxn/powersyslab",
"max_issues_repo_path": "Ex 9/SurgeArrester.tex",
"max_line_length": 101,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "3b6fc0fde5bed2eaee396cab92e60ed280c4cd8b",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "zrthxn/powersyslab",
"max_stars_repo_path": "Ex 9/SurgeArrester.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-16T08:48:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-16T08:48:05.000Z",
"num_tokens": 1323,
"size": 5208
} |
\documentclass[output=collectionpaper]{langsci/langscibook}
\ChapterDOI{10.5281/zenodo.3462768}
\author{Bruno Olsson\affiliation{Australian National University}}
\title{The gender system of Coastal Marind}
\abstract{The gender system of Coastal Marind (a Papuan language of the Anim family of South New Guinea; \citealt{Usher2015}) is treated in relative detail in Drabbe's (\citeyear{Drabbe1955}) masterful grammar. The division of nouns into four genders (basically masculine, feminine and two inanimate genders) is familiar from various languages around the globe, but the morphology of exponence (gender agreement marked to a large extent by stem-internal changes on targets) is somewhat more exotic and is occasionally cited in the literature. In this paper I provide an overview of the system, combined with discussion of two issues: the origins of stem-internal gender agreement, and the wide-ranging syncretism between animate plurals and the 4th gender (the 2nd inanimate gender). I show that this `syncretism' makes the status of the 4th gender ambiguous, since the members of this gender also could be analysed as an unusually large class of pluralia tantum. While I argue that the synchronic 4-gender analysis must be maintained for Coastal Marind, I speculate that an erstwhile grouping of pluralia tantum provided the diachronic source of the 4th gender.
\medskip
\keywords{Gender, number, morphology, diachrony, Papuan languages}
}
\maketitle
\begin{document}
\section{Introduction}
The idea that gender systems can become more complex (add a gender or two) through the `reinterpretation' of some non-gender feature as signalling a gender value has a long history in linguistics (e.g.\ \citealt{Brugmann1891} on the origins of the \ili{Indo-European} feminine gender). In this paper I show that the fourth gender of \ili{Coastal Marind} could be more parsimoniously described as pluralia tantum in a 3-gender system; however, I will argue that semantic considerations ultimately force us to retain the traditional four-gender description.
Based on its ambiguous status in \ili{Coastal Marind}, I will speculate that the fourth gender in the languages of the \ili{Anim} family of South New Guinea could have originated as a grouping of pluralia tantum nouns, and that subsequent changes in the agreement system and attraction of additional nouns to the emerging fourth gender could have lead to a present situation where the pluralia tantum analysis is no longer possible, resulting in a 4-gender system.
I also add further support to Usher \& Suter's \parencite*{Usher2015} proposal that one of the main manifestations of gender agreement in the language \textendash{} stem internal vowel alternations in agreement targets \textendash{} arose from a process of umlaut triggered by postposed articles, by showing that the synchronic distribution of stem-final vowels in nouns is consistent with gender umlaut affecting a much larger part of the lexicon than just present-day gender-agreeing lexemes. The discussion is based on data from the best known \ili{Anim} language, \ili{Coastal Marind} (for a modern reference grammar, see \citealt{Olsson2017}).
The article is structured as follows. \sectref{sec:Bruno:MarGndr} is a brief demonstration of the four genders of \ili{Coastal Marind}. The language is placed in its areal and genealogical context in \sectref{sec:Bruno:context}, while \sectref{sec:Bruno:typology} provides information about some relevant structural features of \ili{Coastal Marind}.
%
\sectref{sec:Bruno:4genders} describes the interesting correlation between stem-final vowels and gender membership in nouns, showing that it is of limited productivity synchronically, but likely derives from an earlier system of postnominal gender articles.
%
\sectref{sec:Bruno:agr} describes gender agreement across the clause, with emphasis on the systematic correspondence between exponents of Gender IV and the plural of Gender I/II.
%
\sectref{sec:Bruno:Mar-indexing} shows that this correspondence continues in the participant indexing on the verb. This suggests an alternative analysis according to which Gender IV is an unusually large group of pluralia tantum rather than a gender of its own.
%
In \sectref{sec:Bruno:Mar-assignment} I will show that the assignment of nouns to Gender III and IV is largely arbitrary, but that the occurrence in Gender IV of many nouns that are typical pluralia tantum nouns across languages is suggestive of being a remnant of such a grouping. I also show that a similar pattern occurs in \ili{Mian}, a language that probably is a distant relative of \ili{Coastal Marind} since the \ili{Anim} and \ili{Ok} families (to which \ili{Mian} belongs) are likely members of the enormous Trans-New Guinean\il{Trans-New Guinea} super-family. I conclude that the 4-gender analysis should be maintained for the present state of \ili{Coastal Marind}, but that the pluralia tantum nouns possibly provided the source for the fourth gender.
\subsection{The Coastal Marind 4-gender system}\label{sec:Bruno:MarGndr}
The existence of a 4-gender system in \ili{Coastal Marind} is evident if one compares the form of the demonstrative \mar{Vpe} (where \mar{V} stands for a vowel) or the adjective \mar{samla\GH Vn} `mid-size, neither big nor small' combined with different nouns in examples (\ref{ex:Bruno:Mar-intro1})--(\ref{ex:Bruno:Mar-intro3}). As indicated by the hyphens, attributively used adjectives are compounded with their head nouns. The nouns themselves are invariant.
\ea\label{ex:Bruno:Mar-intro1}
%\\
\begin{xlist}
\ex
\gll samla\GH\textbf{e}n-patul \textbf{e}-pe\\
mid.size:I-boy(I) I-that\\
\ex
\gll samla\GH\textbf{u}n-kyasom \textbf{u}-pe\\
mid.size:II-girl(II) II-that\\
\glt `that mid-size boy/girl'
\end{xlist}
\z
\ea\label{ex:Bruno:Mar-intro2}
%\\
\begin{xlist}
\ex
\gll samla\GH\textbf{i}n-patul \textbf{i}-pe\\
mid.size:I/II.\textsc{pl}-boys(I) I/II.\textsc{pl}-that\\
\ex
\gll samla\GH\textbf{i}n-kyasom \textbf{i}-pe\\
mid.size:I/II.\textsc{pl}-girls(II) I/II.\textsc{pl}-that\\
\glt `those mid-size boys/girls'
\end{xlist}
\z
\ea\label{ex:Bruno:Mar-intro3}
%\\
\begin{xlist}
\ex
\gll samla\GH\textbf{a}n-da \textbf{e}-pe\\
mid.size:III-sago(III) III-that\\
\glt `that mid-size sago palm/those mid-size sago palms'\\
\ex
\gll samla\GH\textbf{i}n-bomi \textbf{i}-pe\\
mid.size:IV-termite.mound(IV) IV-that\\
\glt `that mid-size termite mound/those mid-size termite mounds'\\
\end{xlist}
\z
\noindent All nouns denoting male humans behave like \mar{patul} `boy' (in \ref{ex:Bruno:Mar-intro1}a) in combining with a demonstrative with the initial vowel \mar{e-} in the singular; nouns denoting female humans (and all animals) pattern like \mar{kyasom} `girl' (\ref{ex:Bruno:Mar-intro1}b) in combining with an \mar{u}-initial demonstrative. As the examples in (\ref{ex:Bruno:Mar-intro2}) show, these nouns exhibit a contrast in number. The demonstrative has to be \mar{ipe} in the plural, and the adjective, which is compounded with its head noun, has the exponent vowel \mar{i} in the final syllable of the stem.
The nouns in (\ref{ex:Bruno:Mar-intro3}) are inanimate, and trigger different vowels on the demonstrative: \mar{da} `sago palm' triggers \mar{e-}, \mar{bomi} `termite mound' triggers \mar{i-}. Note that the resulting forms are homophonous with demonstratives in the preceding examples: \mar{\textbf{e}pe} in (\ref{ex:Bruno:Mar-intro3}a) with the demonstrative used for \mar{patul} in (\ref{ex:Bruno:Mar-intro1}a), and \mar{\textbf{i}pe} in (\ref{ex:Bruno:Mar-intro3}b) with the plural forms in (\ref{ex:Bruno:Mar-intro2}). For (\ref{ex:Bruno:Mar-intro3}a), the distinct form \mar{samla\GH \textbf{a}n} of the adjective proves that this is indeed a separate gender, although the agreement of the demonstrative happens to be homophonous with that seen in (\ref{ex:Bruno:Mar-intro1}a). But the case in (\ref{ex:Bruno:Mar-intro3}b) is more difficult, since the agreement on both the demonstrative and the adjective turns out to be homophonous with the plural forms. I will return to this pervasive syncretism further below.
The four agreement classes \textendash{} from now on referred to as Gender I, II, III and IV \textendash{} are summarized in Table~\ref{table:Bruno:agrcls}, as evidenced by the exponence pattern of \mar{samla\GH Vn}.
\begin{table}[!htb]
\centering
\begin{tabular}{lll}
\lsptoprule
& \textsc{sg} & \textsc{pl} \\
\midrule
I &e\tknode{A} &\multirow{2}{*}{\tknode{F}i}\\
II &u\tknode{E}&\\
%I &e &\multirow{2}{*}{i}\\
%II &u&\\
III &\multicolumn{2}{c}{a}\\
IV &\multicolumn{2}{c}{i}\\
\lspbottomrule
\end{tabular}
% now connect up the nodes
\tikz[remember picture, overlay] \draw[thick] (E.center) -- (F.center);
\tikz[remember picture, overlay] \draw[thick] (A.center) -- (F.center);
\caption{Exponents of agreement on \mar{samla\GH Vn} `mid-size'}
\label{table:Bruno:samlaxVn}
\label{table:Bruno:agrcls}
\end{table}
These data represent one of the most well-known gender systems in New Guinea. The \ili{Coastal Marind} system of four grammatical genders has featured in prominent publications such as \textcite[116]{Corbett1991} and \textcite[60]{Aikhenvald2000} after having been brought to the fore in Foley's influential compendium on Papuan languages \parencite[82--83]{Foley1986}. This attention is due to the description of the gender system provided in Petrus Drabbe's extensive grammar of the language \parencite{Drabbe1955}. Few researchers seem to have had the courage to dive deeper into Father Drabbe's sometimes quite demanding \emph{Spraakkunst}, so one purpose of this article will be to give a more representative picture of the gender system and its manifestations, and, in particular, the syncretism between animate plurals and Gender IV. The data come from my own fieldwork on the Western variety of \ili{Coastal Marind}, a dialect that is mutually intelligible with the Eastern variety described by Drabbe.
\subsection{Coastal Marind in context}
\label{sec:Bruno:context}
The varieties collectively known as \ili{Coastal Marind} are spoken in ca.~40 villages along the coast of the Arafura sea and in the adjoining swampy lowlands. I estimate the total number of speakers to be around 14.000 based on government and SIL figures. The \ili{Coastal Marind} land forms part of the linguistically diverse Trans-Fly area \parencite{Evans2012,Evans2018} straddling the border of present-day Indonesia (where \ili{Coastal Marind} is spoken) and the independent country of Papua New Guinea.
The dialect situation is complex, and it is probable that ongoing research will show that some of the varieties described in the literature as dialects are in fact distinct languages. Dialectal variation in gender would likely be an interesting area to explore, as there are differences (mainly in assignment) even between villages speaking virtually identical varieties of \ili{Coastal Marind}. On the whole, however, the basics of gender and agreement are the same in all known varieties, so the data presented here (from the village of Wambi) are representative of all coastal varieties, and probably of the (less well-known) inland varieties as well.
On a higher level, gender has recently emerged as a crucial factor in the genealogical classification of \ili{Coastal Marind}. \textcite{Usher2015} show that gender ablaut in nouns such as \mar{an\textbf{e}m} `man', \mar{an\textbf{u}m} `woman' and \mar{an\textbf{i}m} `people' recur throughout a number of languages of the Trans-Fly region. This observation, in addition to a large set of lexical cognates showing regular sound correspondences, leads Usher \& Suter to propose a hitherto unrecognized language family \textendash{} the \ili{Anim} family, named after the recurring word for `people' \textendash{} of which \ili{Coastal Marind} so far is the only language for which substantial descriptive work is available. Obviously, more work on the other \ili{Anim} languages \textendash{} several of which are rapidly losing speakers \textendash{} could provide crucial insights into the development of the \ili{Anim} gender system.
\subsection{Typological background}
\label{sec:Bruno:typology}
Some of the structural features of \ili{Coastal Marind} are relevant to the description of its gender system. \ili{Coastal Marind} displays the relatively rare combination of verb-final constituent order and massively prefixing verb inflection. Based on co-occurrence, a prefixal template with ca.~18 slots can be set up, marking notions such as tense, various aspectual distinctions, applicatives, reciprocal, various adverbial meanings (`again', `first', `far away', `in contact with surface') and indexation of (roughly) actor, recipient and affected possessor; undergoer indexation is in turn marked on the verb stem by complicated alternations including pre-, suf-, in-, and circumfixal morphology.
Some of the prefixes occupying the first (i.e.\ leftmost) positions agree in gender with an argument, although they primarily mark grammatical distinctions other than gender (e.g.\ tense-aspect). The prefixes devoted to argument indexing, on the other hand, reflect person and number but are insensitive to gender (with some exceptions to be discussed later). The verb stem itself is an important site for the manifestation of gender, so the intricate stem changes will be crucial to the arguments made here.
A relatively straightforward example of how verbs are segmented is given in (\ref{ex:Bruno:fight}). This verb has two prefixes, of which the first (leftmost) prefix agrees in gender with the subject (plural of Gender I/II). The stem is separated from the prefixal complex by a phonological boundary (indicated in glossing by means of a trailing hyphen followed by a blank). The formative \mar{n-} on the stem marks it as the 1st person undergoer form, which clearly is a mismatch since there is no 1st person participant involved in the event. This idiosyncrasy is part of the reciprocal construction, and such value mismatches are not uncommon in \ili{Coastal Marind} (cf.\ \sectref{sec:Bruno:Mar-indexing}).
\ea\label{ex:Bruno:fight}
\gll ip-enam- n-asak-e\\
\textsc{absc}:I/II.\textsc{pl}-\textsc{recp}- 1.\textsc{u}-fight-\textsc{ipfv}\\
\glt `They are fighting.'
\z
Nominal morphology is sparse: there is no case marking and most nouns do not show overt gender marking. The exception is a handful of nouns (mostly kinship terms) that show alternations in the stem-final vowel according to gender (see below). This marking pattern also occurs on a subset of adjectives which agree with a noun in attributive and predicative use. The majority of adjectives are invariant and fail to show agreement. Instead, the main loci of gender agreement outside verbs are demonstratives and pronominal-like words (emphatic pronouns, question words). In the next section I turn to the reflexes of gender in nouns and what they can tell us about the diachronic development of gender marking in this part of the lexicon.
\section{The manifestation of gender in nouns}
\label{sec:Bruno:4genders}
\subsection{Overt gender}
\label{sec:Bruno:2.1}
A comparison of gender agreement across different word classes confirms that the picture emerging from examples (\ref{ex:Bruno:Mar-intro1})--(\ref{ex:Bruno:Mar-intro3}) above is correct. All words that show morphological alternations according to gender follow these four agreement clas\-ses, although exponents vary across the targets showing agreement, and although many targets do not distinguish all four classes. Before dealing with agreement proper, we will consider nouns displaying \spterm{overt gender}. Whereas such alternations are not productive in contemporary \ili{Coastal Marind}, a closer look reveals that traces of a more wide-ranging system of stem-final vowel alternations can be observed. The origins of this system of overt marking can be reconstructed following \cite{Usher2015}, as will be seen later.
Some nouns with overt gender marking are listed in Table~\ref{table:Bruno:overt}. Gender membership is reflected by the vowel in the final syllable of the stem (referred to as the `stem-final vowel'), and the meaning of the noun is largely predictable from the gender. Thus, the skeletal stem \mar{anVm} (a) can be thought of as having the general meaning `person', which is narrowed down to `man' when assigned to Gender I (\mar{an\textbf{e}m}), `woman' in Gender II (\mar{an\textbf{u}m}), etc.; the stem \mar{nahyVm} `my spouse' (f) (\mar{na-} is a 1st person possessive prefix) giving `husband' (\mar{nahy\textbf{a}m}, Gender I) and `wife' (\mar{nahy\textbf{u}m} Gender II) once gender is assigned and vowels plugged into the stem.\footnote{Note that `overt gender' only applies to nouns for which there is at least one other noun differing only in a stem-internal vowel, with a corresponding change in meaning. For example, the Gender IV noun \mar{bomi} `termite mound' does not have overt gender despite the presence of stem-final \mar{i} (which is the general exponent of Gender IV agreement), since there are no corresponding nouns *\mar{bome}, *\mar{bomu} etc.\ to be found in the other genders.}
\begin{table}[t]
\centering
\caption{Overt gender on nouns}
\label{table:Bruno:overt}
\begin{tabular}[t]{llllll}
\lsptoprule
& I \textsc{sg} & II \textsc{sg} & I/II \textsc{pl} & III & IV \\
\midrule
a. & \mar{an\textbf{e}m} & \mar{an\textbf{u}m} & \mar{an\textbf{i}m} & \mar{an\textbf{e}m} & \mar{an\textbf{i}m} \\
& \footnotesize{`man'} & \footnotesize{`woman'} & \footnotesize{`people'} & \footnotesize{}\\
b. & \mar{nam\textbf{e}k} & \mar{nam\textbf{u}k} & \mar{nam\textbf{i}k} & \mar{} & \mar{} \\
& \footnotesize{`cousin (m)'} & \footnotesize{`cousin (f)'} & \footnotesize{`cousins'} & \footnotesize{} & \footnotesize{} \\
c. & \mar{} & \mar{namak\textbf{u}d} & \mar{namak\textbf{i}d} & \mar{namak\textbf{a}d} & \mar{namak\textbf{i}d} \\
& \footnotesize{} & \footnotesize{`animal'} & \footnotesize{`animals'} & \footnotesize{`thing(s)'} &\footnotesize{`thing(s)'}\\
d. & \mar{amnangg\textbf{i}b} & \mar{} & \mar{amnangga} & \mar{} & \mar{} \\
& \footnotesize{`married man'} & \footnotesize{} & \footnotesize{`married men'} & \footnotesize{} & \footnotesize{} \\
e. & \mar{wanangg\textbf{i}b} & \mar{wanangg\textbf{u}b} & \mar{wanangga} & \mar{} & \mar{} \\
& \footnotesize{`boy'} & \footnotesize{`girl'} & \footnotesize{`children'} & \footnotesize{}\\
f. & \mar{nahy\textbf{a}m} & \mar{nahy\textbf{u}m} & \mar{} & \mar{} & \mar{} \\
& \footnotesize{`my husband'} & \footnotesize{`my wife'} \\%& \footnotesize{`'} & \footnotesize{`'} & \footnotesize{`'} \\
g. & \mar{e\GH \textbf{a}l} & \mar{e\GH \textbf{u}l} & \mar{} & \mar{} & \mar{} \\
& \footnotesize{`somebody (m)'} & \footnotesize{`somebody (f)'} & \footnotesize{} & \footnotesize{}\\
h. & \mar{nan\textbf{i}h} & \mar{nan\textbf{u}h} & \mar{nan\textbf{i}h} & \mar{} & \mar{} \\
& \footnotesize{`face (m)'} & \footnotesize{`face (f)'} & \footnotesize{`faces'} & \footnotesize{} & \footnotesize{} \\
%a. & \mar{} & \mar{} & \mar{} & \mar{} & \mar{} \\
% & \footnotesize{`'} & \footnotesize{`'} & \footnotesize{`'} & \footnotesize{`'} & \footnotesize{`'} \\
\lspbottomrule
\end{tabular}
\end{table}
Assuming that the sets of gender forms derived from the skeletal stems are best treated as members of unitary lexemes, we can say that these lexemes are a proper subset of the nouns having \spterm{referential gender} \citep{Dahl2000a}, i.e.\ nouns that lack intrinsic gender and receive their gender value from the referent at hand. Most such nouns do not show overt gender, e.g.\ \mar{\GH una\GH on} `infant' (which takes agreement in Gender I or II depending on the sex of the referent).
The disassembly of \ili{Coastal Marind} nouns into skeletal stems with inserted gender markers could appear to be a slightly misleading way of approaching the gender system of the language, since the phenomenon is fairly marginal. Only a dozen lexical items or so display the vowel alternation,\footnote{There are a handful of other nouns with overt gender in addition to the ones shown in the table. All of these denote humans of different age-ranks or societal roles that are more or less obsolete today, so the corresponding terms are falling out of use.} and many of the expected forms are irregular (e.g.\ plural of \mar{wananggVb} is \mar{wanangga} `children', there is no plural *\mar{wananggib}) or simply non-existent (e.g.\ there is no plural of \mar{e\GH Vl} `somebody'). The vowel alternation seems to be complete only for the stems \mar{anVm} and \mar{namakVd}: in addition to the person-denoting triplet man/woman/people, the former provides the forms \mar{anem} and \mar{anim} for inanimate denotanda in Gender III and IV respectively, for example in some compounds denoting fruits (\textit{ambun\hyp{}anem}, a Syzygium species in Gender III), while \mar{namakVd} apparently can be used for non-rational entities (animals, things) of all genders except the masculine I.%
\footnote{In fact it seems that the stem \mar{namakVd} `animal/thing' can be used in Gender I: speakers reported that \mar{namak\textbf{e}d} can be used to refer to a male, although apparently with pejorative overtones, although I have never observed this in spontaneous speech.}
Looking at more nouns from Gender I and II, it seems clear that the pattern of alternating vowels showing gender membership is exception rather than rule. Nouns in Gender I denoting male humans also include \mar{patul} `boy', \mar{ad} `father', \mar{manda\GH} `wife's elder brother, younger sister's husband' and so on; these nouns do not participate in any alternation with corresponding plural or female-denoting nouns. %, and all of them fail to show the `Gender I vowel' /e/ stem-finally.
Person-denoting nouns in Gender II that likewise show no trace of overt gender are \mar{kyasom} `girl', \mar{nikna} `son's wife', \mar{ne} `mother's brother's wife' etc.
% although marginal, still good illustration of stem-final V alternation.
Although overt gender is found only in a very small portion of the nominal lexicon, it should be noted that some of these nouns are high-frequency items, such as the words corresponding to the stem \mar{anVm}, whose combined score makes them more frequent than any other noun in my corpus. Outside the noun inventory, stem-final vowel alternation plays an important role in common agreement targets such as the emphatic pronoun \mar{anVp} (`-self'), adjectives such as \mar{papVs} `small' and the postposition \mar{lVk} `from'. This means that overt gender on nouns, and stem-final vowel alternation in general, is a common feature of \ili{Coastal Marind} discourse, and obviously not as marginal as it would seem from a dictionary count alone.
A central claim of the comparative work in \textcite{Usher2015} is that the vowel alternations according to gender occur in languages throughout the \ili{Anim} family, and that its origins can be reconstructed. Consider the forms \mar{aneme(a)} `man', \mar{anumu} `woman', \mar{animi} `people' from the related language Ipiko, another member of the \ili{Anim} family. Usher \& Suter argue that the stem-final vowel in \mar{anVm} and other alternating stems is a residue of an earlier system of postnominal articles marking the gender of the noun, and they reconstruct expressions such as \mar{*anem=e} `the man', \mar{*anum=u} `the woman', \mar{*anim=i} `the people' \parencite*[114]{Usher2015}. In an earlier stage the noun was invariant and it was the presence of the gender article that triggered umlaut in the stem-final syllable (the shape of the invariant stem is beyond what can be reconstructed from the available data).
Usher \& Suter's hypothesis is plausible, especially as it refers to a well-known process leading to stem-internal vowel alternations (cf.\ \ili{Germanic} umlaut giving \ili{English} \emph{mouse} and \emph{mice} triggered by an earlier plural ending \emph{*-iz}). It can be added that some alternations are likely the result of more recent derivations involving gender-marking morphology. For example, the word \mar{wa\GH uklu} `girl' and its plural \mar{wa\GH uklik} `girls' are probably related to the postposition `from' which has the forms \mar{luk} and \mar{lik} in the feminine and plural respectively, and which seems to be the source of many deverbal nominals in \ili{Coastal Marind} (see \citealt[335]{Geurtjens1933} for the etymology; cf.\ \mar{dahahiplik} `drunkards' from \mar{dahahip} `become drunk (plural subject)'). However, the ultimate source of the vowel alternation in \mar{lVk} `from' is likely not distinct from the umlaut process giving rise to the forms of \mar{anVm}, so the suggestion that some cases of synchronic vowel alternations are of more recent origin than the original umlaut is not intended as a counterexample to Usher \& Suter, but as an indication that the alternating pattern propagated indirectly through the lexicon as a result of derivation.
\subsection{Simulating the effects of umlaut in the lexicon}
Given the observations of alternating nouns showing overt gender, and Usher \& Suter's suggestion that the alternation came about because of umlaut triggered by a postposed article, the following interesting question arises: are there traces of umlaut also in non-alternating noun stems?
If umlaut was a regular process, we would expect it to have appeared with many nouns, as long as they were used with postposed articles. In the ideal case, all nouns in Gender I would have ended up with the stem-final vowel \mar{e}, those in Gender II stem-final \mar{u}, Gender III \mar{a}, and those in Gender IV \mar{i}. This is clearly not the case, as shown by the counts of stem-final vowels in Table~\ref{table:Bruno:numbers}. The table displays the frequency with which each of the five vowels of \ili{Coastal Marind} occurs in the last syllable of nouns whose gender membership has been determined. I have excluded all nouns showing overt gender from the counts, since we already know that their stem-final vowels correlate with gender membership. This is the reason why Gender I has so few members: the remaining male-denoting nouns have overt gender (e.g.\ \mar{anVm}). Gender II likewise contains only a handful of female-denoting nouns, but has a higher count since it includes all names of animals.
\begin{table}
\centering
\begin{tabular}{lrrrrr}
\lsptoprule
& I (\mar{e}) & II (\mar{u}) & III (\mar{a}) & IV (\mar{i}) & Tot.\\
\midrule
/i/ &5 &29 &25 &\cellcolor{lsLightGray}44 & 103\\
/u/ &0 &\cellcolor{lsLightGray}27 &39 &19 & 85\\
/e/ &1 &15 &31 &13 & 60\\
/o/ &2 &22 &34 &14 & 72\\
/a/ &4 &55 &\cellcolor{lsLightGray}108 &29 & 196\\
\midrule
Tot. &12 &148 &237 &119 & \textbf{516}\\
\lspbottomrule
\end{tabular}
\caption{Distribution of stem-final vowels in nouns according to gender}
\label{table:Bruno:numbers}
\end{table}
Consider now the possibility that stem-final vowels of nouns and gender membership correlate to some degree, despite there being no one-to-one match. We are particularily interested in the vowels \mar{e}, \mar{u}, \mar{a} and \mar{i}, which \textcite{Usher2015} identify as the vowels of the proto-\ili{Anim} demonstrative.%
\footnote{In fact, \textcite[119]{Usher2015} tentatively reconstruct both *\emph{a} and *\emph{o} for the proto-\ili{Anim} Gender III, but the exponent \mar{o} is rare in \ili{Coastal Marind}.} %
The vowels are given inside parentheses after their associated genders at the top of the table.
We cannot test the correlation for Gender I, since there are too few nouns assigned to this category. The relevant cells for the remaining three genders have been shaded in Table~\ref{table:Bruno:numbers}. We now need to ascertain whether these scores could have been produced by a chance distribution of stem-final vowels, or whether they are non-random, thereby providing evidence that the umlaut pattern is found beyond the synchronically attested overt gender nouns.
To test this, I performed a simulation in which the nouns were reassigned randomly to the four genders (keeping the proportions intact), and then counted the frequency with which the vowels turned up in each gender. This procedure was then repeated a total of 200.000 times; the accumulated counts for the occurrence of the relevant vowels in Gender II, III and IV are presented in Figure~\ref{fig:Bruno:distr}, with the actual frequency of the vowel represented by the cross on the x-axis. The results show that two of the vowels are over-represented to a significant degree: \mar{a} as the stem-final vowel in Gender III (\emph{z}=2.40, adjusted \emph{p}<0.05) and \mar{i} as the stem-final vowel of Gender IV (\emph{z}=4.65, adjusted \emph{p}<0.001). These results support the hypothesis that gender umlaut affected a part of the lexicon that is larger than the set of nouns with overt gender, including many nouns of Gender III and IV.
No other positive skewings were close to statistical significance. This is somewhat surprising for Gender II, which would be expected to show a preference for \mar{u} as the stem-final vowel (cf.\ the leftmost pane in Figure~\ref{fig:Bruno:distr}). I have no explanation for this, but it is worth noting that \ili{Coastal Marind} seems to differ from other \ili{Anim} languages in the uniform assignment of animals to Gender II: animals turn out to be divided between Gender I and II (the `masculine' and `feminine' genders) in Kuni \parencite[9]{Edwards-Fumey2007}, Ipiko \citep[117, examples 16--17]{Usher2015}, and Bitur (Phillip Rogers, pers.\ comm.\@) which belong to three distinct sub-branches of \ili{Anim}. A possible scenario would be that the reassignment of all animals to Gender II is an innovation present in \ili{Coastal Marind}, which then would have obliterated any preponderance of \mar{u} in Gender II as the new members entered.
\begin{figure}[t]
\scriptsize
\begingroup%
\makeatletter%
\providecommand\color[2][]{%
\errmessage{(Inkscape) Color is used for the text in Inkscape, but the package 'color.sty' is not loaded}%
\renewcommand\color[2][]{}%
}%
\providecommand\transparent[1]{%
\errmessage{(Inkscape) Transparency is used (non-zero) for the text in Inkscape, but the package 'transparent.sty' is not loaded}%
\renewcommand\transparent[1]{}%
}%
\providecommand\rotatebox[2]{#2}%
\ifx\svgwidth\undefined%
\setlength{\unitlength}{345bp}%
\ifx\svgscale\undefined%
\relax%
\else%
\setlength{\unitlength}{\unitlength * \real{\svgscale}}%
\fi%
\else%
\setlength{\unitlength}{\svgwidth}%
\fi%
\global\let\svgwidth\undefined%
\global\let\svgscale\undefined%
\makeatother%
\begin{picture}(1,0.33530411)%
\put(0,0){\includegraphics[width=\unitlength,page=1]{figures/08/Rplot2.pdf}}%
\put(0,0){\includegraphics[width=\unitlength,page=2]{figures/08/Rplot2.pdf}}%
\put(0,0){\includegraphics[width=\unitlength,page=3]{figures/08/Rplot2.pdf}}%
\put(0,0){\includegraphics[width=\unitlength,page=4]{figures/08/Rplot2.pdf}}%
\put(0.22278329,0.31247093){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{\textbf{II : \mar{u}}}}}%
\put(0.51684982,0.31247093){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{\textbf{III : \mar{a}}}}}%
\put(0.82047708,0.31247093){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{\textbf{IV : \mar{i}}}}}%
\put(0.04929372,0.04768585){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{0}}}%
\put(0.02085631,0.10705092){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{5000}}}%
\put(0.01137718,0.16641598){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{10000}}}%
\put(0.01137718,0.22578093){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{15000}}}%
\put(0.01137718,0.28514593){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{20000}}}%
\put(0,0){\includegraphics[width=\unitlength,page=5]{figures/08/Rplot2.pdf}}%
\put(0.11278115,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{10}}}%
\put(0.18246935,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{20}}}%
\put(0.25215754,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{30}}}%
\put(0.32184573,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{40}}}%
\put(0,0){\includegraphics[width=\unitlength,page=6]{figures/08/Rplot2.pdf}}%
\put(0.45385705,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{70}}}%
\put(0.53263503,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{90}}}%
\put(0.60664738,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{110}}}%
\put(0,0){\includegraphics[width=\unitlength,page=7]{figures/08/Rplot2.pdf}}%
\put(0.72345954,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{10}}}%
\put(0.79691474,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{20}}}%
\put(0.87036978,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{30}}}%
\put(0.94382482,0.02141167){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{40}}}%
\put(0.26357878,-0.01032957){\color[rgb]{0,0,0}\makebox(0,0)[lb]{\smash{\textbf{Number of nouns with the indicated stem-final vowel}}}}%
\put(0.00000578,0.14545869){\color[rgb]{0,0,0}\rotatebox{90}{\makebox(0,0)[lb]{\smash{\textbf{Count}}}}}%
\end{picture}%
\endgroup%
\caption{Actual and simulated distributions of stem-final vowels}
% z.value p.value p.adjusted
%I 1.881 0.030 0.120
%II 0.581 0.281 1.000
%III 2.405 0.008 0.032
%IV 4.650 0.000 0.000
\label{fig:Bruno:distr}
\end{figure}
\section{Gender agreement}\label{sec:Bruno:agr}
I will now consider how gender is manifested across agreeing pronominals,\linebreak demonstratives and adjectives.\footnote{There is one more type of agreement target, viz.\ the four postpositions \mar{lVk} `from', \mar{nV} `without', \mar{tV} `with' and \mar{hV} `like'. They are interesting for a variety of reasons, but I omit them from discussion here.} The purposes will be to give an overview of the agreement system, which contains some typologically interesting features, and more specifically to show that the apparent syncretism noted above between Gender IV and the plural of Gender I/II is observed throughout the system. It even turns up in some unexpected places, prompting the question of whether the system is not better analyzed as comprising three genders instead of four, a possibility that will be further explored in \sectref{sec:Bruno:Mar-indexing}, \sectref{sec:Bruno:Mar-assignment} and \sectref{sec:Bruno:Mar-synchrony}.
\subsection{Pronominals and demonstratives}
The only word classes in which agreement is found on a majority of the members are demonstratives and pronominals. Agreement on the distal demonstrative \mar{Vpe} was seen in (\ref{ex:Bruno:Mar-intro1})--(\ref{ex:Bruno:Mar-intro3}) above; some more examples of agreeing targets within these categories are in Table~\ref{table:Bruno:targets}. While the small set of personal pronouns in \ili{Coastal Marind} (\mar{nok} `I, we' \mar{o\GH} `2\textsc{sg}', \mar{yo\GH} `2\textsc{pl}') show no gender distinction, gender agreement is pervasive across other pronominal-like elements such as question words (e.g.\ \mar{tV} `who, what' \mar{Vn} `where, which') and the polyfunctional word \mar{agV}, which has among its uses that of a placeholder `whats-his/her-name' (referring to a person) or `whatchamacallit' (referring to a thing).\footnote{Forcing speakers to choose a gender for words meaning `who, what?' that refer to some unknown entity might seem counter-intuitive since the gender of the referent must be unknown in many cases (since there is no clear semantic basis for Gender III and IV); cf.\ European languages restricting gender agreement to attributive `which' (e.g.\ \ili{Russian} \emph{kotoryj} `which (masc.)' etc.\@) while pronominal `who' lacks agreement (e.g.\ \ili{Russian} \mar{kto} `who'). Gender agreement on placeholders appears more common, especially in placeholders of phrasal and/or pronominal origin such as \ili{English} \emph{whatchamacallit} etc.} Note that, in contrast to the various unpredictable exponents of Gender I and III, the exponents of Gender II (\mar{u}) and Gender IV (\mar{i}) are constant across all targets, with the latter showing homophony with the I/II plural in all four items.
\begin{table}[t]
\centering
\caption{Pronominal and demonstrative targets}
\label{table:Bruno:targets}
\begin{tabular}[t]{llllll}
\lsptoprule
Gloss & I \textsc{sg} & II \textsc{sg} & I/II \textsc{pl} & III & IV \\
\midrule
`whats-his/her-name, whatchmacallit' & \mar{ag\textbf{e}} & \mar{ag\textbf{u}} & \mar{ag\textbf{i}} & \mar{ag\textbf{o}} & \mar{ag\textbf{i}} \\
`who/what' & \mar{t\textbf{a}} & \mar{t\textbf{u}} & \mar{t\textbf{i}} & \mar{t\textbf{a}} & \mar{t\textbf{i}} \\
`him-/her-/itself/themselves' & \mar{an\textbf{e}p} & \mar{an\textbf{u}p} & \mar{an\textbf{i}p} & \mar{an\textbf{e}p} & \mar{an\textbf{i}p} \\
`this/these' & \mar{\textbf{e}he} & \mar{\textbf{u}he} & \mar{\textbf{i}he} & \mar{\textbf{e}he} & \mar{\textbf{i}he} \\
\lspbottomrule
\end{tabular}
\end{table}
\subsection{Adjectives}
\ili{Coastal Marind} adjectives are similar to nouns in that both classes lack the luxuriant inflectional possibilities of verbs. The main morphosyntactic feature distinguishing adjectives from nouns seems to be the lack of inherent gender. A small subclass of adjectives (13 members are known in the Western dialect) agree in gender, some of which are shown in Table~\ref{table:Bruno:adjs}. Other adjectives are invariant (e.g.\ \mar{yaba} `big', \mar{ndom} `bad', \mar{waninggap} `good'). The patterns of exponence largely follow those familiar from nouns with overt gender, with agreement marked by means of changes in the stem-final vowel, except for \mar{VhV} `ripe' which shows a unique pattern of vowel height harmony. Note that some of the adjectives are semantically incompatible with animates, whence the dashes in the table.
\begin{table}[t]
\centering
\begin{tabular}{llllll}
\lsptoprule
Gloss & I \textsc{sg} & II \textsc{sg} & I/II \textsc{pl} & III & IV \\
\midrule
% \mar{} &`' &\mar{} & \mar{} & \mar{} & \mar{} \\
`light (weight)' &\mar{ak\textbf{e}k} & \mar{ak\textbf{u}k} & \mar{ak\textbf{i}k} & \mar{ak\textbf{a}k} & \mar{ak\textbf{i}k} \\
`short' &\mar{dahwag\textbf{e}s} & \mar{dahwag\textbf{u}s} & \mar{dahwag\textbf{i}s} & \mar{dahwag\textbf{i}s} & \mar{dahwag\textbf{i}s} \\
`thin' &\mar{halah\textbf{e}l} & \mar{halah\textbf{u}l} & \mar{halah\textbf{i}l} & \mar{halah\textbf{a}l} & \mar{halah\textbf{i}l} \\
`sharp' & -- & -- & -- & \mar{ya\GH ay\textbf{a}\GH} & \mar{ya\GH ay\textbf{i}\GH} \\
`dull' & -- & -- & -- & \mar{\GH anda\GH \textbf{a}l}& \mar{\GH anda\GH \textbf{i}l} \\
`old, ancient' &\mar{tanam\textbf{e}}&\mar{tanam\textbf{u}} &\mar{tanam\textbf{i}} &\mar{tanam\textbf{a}}&\mar{tanam\textbf{i}}\\
`strong' & \mar{tag\textbf{e}} & \mar{tag\textbf{u}} & \mar{tag\textbf{i}}& \mar{tag\textbf{a}} & \mar{tag\textbf{i}} \\ %\hdashline
`ripe' & -- & -- & -- & \mar{\textbf{e}h\textbf{o}} & \mar{\textbf{i}h\textbf{u}} \\
\lspbottomrule
\end{tabular}
\caption{Gender agreement on adjectives}
\label{table:Bruno:adjs}
\end{table}
The forms of agreeing adjectives are much more regular than nouns with overt gender: Gender I and II consistently have /e/ and /u/ as their exponents, and their plural indicated by /i/; for inanimates, Gender III is largely indicated by /a/, while the pattern of homophony between the I/II plural forms and the Gender IV forms is observed again.
A remarkable exception from these regularities is the adjective `small', whose forms are given in Table~\ref{table:Bruno:small}. This adjective is noteworthy for two reasons. First, it is the only word in the language that distinguishes singular and plural for Gender III and IV. This is done by means of the suppletive stems \mar{isahih} and \mar{wasasu\GH}, neither of which bear any phonological resemblance to the singular stem \mar{papVs}. Following \textcite[168]{Corbett1991} we can say that `small' is \spterm{over-differentiated} since it distinguishes a feature (number of inanimates) which is absent elsewhere in the system. However, one could also argue that `small' does not show true agreement for gender, because the stems involved are suppletive. This is the approach taken by \textcite[362]{Durie1986}, who \textendash{} speaking of verbal number suppletion \textendash{} argues that ``suppletive stems select for rather than agree with the number of their argument''. Either way we look at it, `small' has to be marked as an exceptional item, and does not detract from the generalization that number as a nominal category is restricted to the animates, e.g.\ the members of Gender I and II.
\begin{table}
\centering
\begin{tabular}{lllll}
\lsptoprule
& I & II & III & IV \\
\midrule
\textsc{sg} & \mar{pap\textbf{e}s} & \mar{pap\textbf{u}s} & \mar{pap\textbf{e}s} & \mar{pap\textbf{i}s} \\
\textsc{pl} & \mar{isahih} & \mar{isahih} & \mar{wasasu\GH} & \mar{isahih} \\
\lspbottomrule
\end{tabular}
\caption{Gender agreement on `small'}
\label{table:Bruno:small}
\end{table}
Second, the stems used for `small' in the plural are \mar{isahih} and \mar{wasasu\GH}, of which the former (which is also used as a noun meaning `children, young of animals') is used not only for animates, but also for plural of Gender IV. This would be quite surprising if the syncretism between I/II plural and Gender IV noted so far (e.g.\ the demonstrative \mar{ipe} covering I/II plural and IV) were merely a case of accidental homophony. Below we will see other cases where syncretisms between I/II plural and IV suggest a more profound relationship between the forms.
\section{Agreement and participant indexing on verbs}\label{sec:Bruno:Mar-indexing}
The morphology of the \ili{Coastal Marind} verb is complicated, and nominal gender plays a role within three of the inflectional sites of the verb: in a set of gender-agreeing prefixes, in the person indexing reflecting an \spterm{undergoer} argument, and, somewhat marginally, in the indexing of the \spterm{actor} argument of the verb. The gender-agreeing prefixes are the most straightforward, and behave largely like the non-bound agreeing items that we have seen so far. I will give some examples of gender agreement on the verb below. I contrast gender \spterm{agreement} with bound person marking on the verb, which I refer to as \spterm{indexing}. I will show below that these two phenomena behave quite differently in \ili{Coastal Marind}, so it is convenient to make the terminological distinction between agreement and indexing in the description of the Marind verb.
Several inflectional prefixes are sensitive to the gender of some argument of the verb, although their main function lies in some other domain (e.g.\ tense-mode-aspect) so it is not appropriate to call them `gender prefixes'; rather, they are prefixes of which a sub-string happens to show agreement in gender. Let us take the prefix \mar{Vp-} `\textsc{absc}onditive' as an illustration. Simplifying matters drastically, we can say that this prefix is used when the speaker is drawing attention to some present state-of-affairs that is unavailable to the addressee, either because her attention is on something else, as in (\ref{ex:Bruno:kosi-awe}), or because she made a previous statement contradicting the state-of-affairs that actually holds, as in (\ref{ex:Bruno:epakolaxe}). The question of what argument of the verb controls the gender agreement in the prefixes is complicated, and I will not explore it here. Suffice to note that it is the (intransitive) subject in (\ref{ex:Bruno:kosi-awe}) that is the controller, whereas the Gender I agreement in (\ref{ex:Bruno:epakolaxe}) corresponds to the male recipient-like participant (other constellations would behave differently).
\ea
\label{ex:Bruno:kosi-awe}
(Addressee standing facing away:)\\
\gll kosi-awe up-\O- kwa\GH ita! \\
small-fish(II) \textsc{absc}:II-3\textsc{sg}.\textsc{a}- be.swimming.inside\\
\glt `A little fish is swimming in there!'
\z
\ea
\label{ex:Bruno:epakolaxe}
(Reply to ``You should talk to him!'', female speaker:)\\
\gll ep-ak-o- la\GH-e!\\
\textsc{absc}:I-1.\textsc{a}-3\textsc{sg}.\textsc{dat}- talk-\textsc{ipfv}\\
\glt `I am talking to him!'
\z
Morphologically these prefixes are straightforward, since they have the same forms as the distal demonstrative \mar{Vpe} (betraying a historical relationship), minus the final \mar{-e}. The same holds, for example, for the continuative prefix \mar{anVpand-} which most likely derives from the emphatic pronoun series \mar{anVp} (cf.\ Table~\ref{table:Bruno:targets}). Gender agreement in the prefixal complex then seems to be of relatively recent origin, resulting from the integration of free demonstrative and pronominal elements into the verb. Once more, the syncretism between the Gender I/II plural and Gender IV that was encountered in the nominal targets recurs in the prefixal agreement, so the Absconditive prefix \mar{ip-} would be used with an animate plural controller, or with a noun from Gender IV. However, gender of verbal arguments triggers more dramatic alternations elsewhere in the verb, as we will now see.
I refer to bound person markers on the verb as participant indexing since they express person/number of participants of the verb directly \textendash{} there is no need to say that the affixes in (\ref{ex:Bruno:killyou}) `agree' with some ellipsed or covert argument in the clause.
\ea
\label{ex:Bruno:killyou}
%\\
\gll no- \GH-amuk-e\\
\textsc{1.a}- \textsc{2sg.u}-kill-\textsc{ipfv}\\
\glt `I'm going to kill you.'
\z
\noindent There are also frequent mismatches (`disagreement') within person indexing of a type that is not found in the gender agreement. For example, many intransitive verbs use a suppletive stem with plural subjects, with the additional quirk that actor indexing then is obligatorily 3\textsc{sg} instead of 3\textsc{pl}. Compare the regular verb \mar{dahetok} `return', which employs the expected 3\textsc{pl} indexing, with the suppletive stem \mar{na\GH am} `come (plural subject)' (cf.\ \mar{man} `come (singular subject)').
\ea
\label{ex:Bruno:dahetok}
%\\
\gll na- dahetok\\
3\textsc{pl}.\textsc{a}- return\\
\glt `They returned.'
\z
\ea
\label{ex:Bruno:naxam}
%\\
\gll a- na\GH am\\
3\textsc{sg}.\textsc{a}- many.come\\
\glt `They came.'
\z
For this reason I prefer to maintain a terminological distinction between agreement and indexing in the description of \ili{Coastal Marind}. I use agreement about the prefixes whose shape reflect gender and which apparently derive from relatively recently incorporated pronominal elements, while indexing is used for the markers that primarily code person/number of various argument roles, and often require construction- or verb-specific rules for their description (as in the case with the suppletive verbs above). Having established this, we are now ready to explore how gender is manifested in person indexing on the verb.
Let us start by the indexing of undergoer participants. Since we will be concerned with the difference between animate and inanimate undergoers, the discussion will be restricted to 3rd person forms (1st and 2nd person are always animate). Undergoer indexing is realized by means of intricate changes in the verb stem, and is mainly pre-, in-, or suffixing depending on the conjugation class. I will not attempt to segment the verb stems in the interlinear examples below into morphemes; the morphological details are not of interest here.
Consider the verb `put on a string', which has the following forms when the undergoer is animate:
\ea\label{ex:Bruno:}
%\\
\begin{xlist}
\ex
\gll awe ah- laleh!\\
fish(II) \textsc{imp}- string:3\textsc{sg}.\textsc{u}\\
\glt `String one fish!'
\ex
\gll awe ah- lalah!\\
fish(II) \textsc{imp}- string:3\textsc{pl}.\textsc{u}\\
\glt `String many fish!'
\end{xlist}
\z
\noindent With inanimates from Gender III, a different stem \mar{lalig} is used (\ref{ex:Bruno:lalig}). Recall that no number distinction is made for inanimates, so \mar{lalig} can be used for one or several pieces of meat, fruits, or other inanimate entities as long as they are in Gender III.
\ea\label{ex:Bruno:lalig}
%\\
\gll muy ah- lalig!\\
meat(III) \textsc{imp}- string.inanimate\\
\glt `String the piece(s) of meat!'
\z
\noindent With undergoers from Gender IV, however, the stem used with animate plurals, i.e.\ the 3\textsc{pl} stem \mar{lalah}, is used (\ref{ex:Bruno:lalahIV}). As in the previous example, there is no number distinction, so the cardinality of \mar{baba} (a kind of grass, seeds of which are used for necklaces) has to be inferred from context.
\ea\label{ex:Bruno:lalahIV}
%\\
\gll baba ah- lalah!\\
{Job's Tears(IV)} \textsc{imp}- string:3\textsc{pl}.\textsc{u}\\
\glt `String the \emph{baba} seed(s)!'
\z
\noindent It is remarkable that Gender IV nouns trigger the use of verb stems otherwise used for 3rd person animate plurals, since gender agreement is not manifested elsewhere in person indexing. No distinction is made between Gender I and II, and inanimate stems such as \mar{lalig} generally look like separate lexemes rather than inflectional forms of the verb. Some more examples of alternations are given in (\ref{ex:Bruno:stems}).
\ea\label{ex:Bruno:stems}
Stem alternations according to undergoer\\
\begin{tabular}[t]{llrlrl}
a.& `wrap' \\
& \quad Animate & 3\textsc{sg}: &\mar{ambeh} & 3\textsc{pl}: &\mar{ambah} \\
& \quad Inanimate & III: &\mar{ambam} & IV: &\mar{ambah} \\
b.& `rub (bodypart)' \\
& \quad Animate & 3\textsc{sg}: &\mar{hwahwetok} & 3\textsc{pl}: &\mar{hwahwituk} \\
& \quad Inanimate & III: &\mar{hwahwid} & IV: &\mar{hwahwituk} \\
c.& `eat' \\
& \quad Animate & 3\textsc{sg}: &\mar{aheb} & 3\textsc{pl}: &\mar{hi} \\
& \quad Inanimate & III: &\mar{\GH i} & IV: &\mar{hi} \\
d.& `become' \\
& \quad Animate & 3\textsc{sg}: &\mar{win} & 3\textsc{pl}: &\mar{in} \\
& \quad Inanimate & III: &\mar{ay} & IV: &\mar{in} \\
\end{tabular}
\z
Such verbs differ in the degree of similarity between the different stems, but all employ the same stem for Gender IV undergoers as for 3\textsc{pl} animates. There seem to be no exceptions to this pattern, so if a verb is semantically compatible with both animates and inanimates, then the 3\textsc{pl}/IV stem sharing occurs, regardless of how the remainder of the paradigm is structured. Note also that there is no morphological resemblance to the agreement patterns that we observed for nominals: with the exception of stems like \mar{hwahwituk} `rub many animates' (e.g.\ when scaling fish) or `rub a Gender IV-item' (e.g.\ a knee, \mar{mig}), which shows the high vowels /i u/ associated with gender agreement (e.g.\ \mar{ihu} `ripe:IV'), the vowel alternations seen within the nominal domain are absent. I take this to confirm that gender agreement and participant indexing are two quite distinct phenomena in \ili{Coastal Marind}, and that they have different histories, which renders the conflation of animate 3\textsc{pl} and Gender IV across the two systems the more remarkable.
% A-indexing.
Finally, let us consider other types of participant indexing on the verb. There are three varieties of indexing, all realized by prefixes, in addition to the indexing of undergoers by means of stem alternations. These are indexing of actor, seen in examples (\ref{ex:Bruno:killyou})--(\ref{ex:Bruno:naxam}) above, plus indexing of a recipient-like participant, and what can be described as affected possessor of an argument of the verb. I will not provide examples of the latter two, because inanimate arguments filling recipient- and possessor-like roles are extremely rare in the corpus, and it is not clear whether these indexing mechanisms interact with the gender membership of inanimate arguments. The data from actor indexing are more interesting, so let us have a look at it to see whether Gender IV nouns trigger 3\textsc{pl} indexing in this domain.
Sentences with inanimate nouns functioning as semantic agents are also exceedingly rare in my corpus, since argument NPs headed by such nouns mostly fill patient-like roles. I have made several attempts to elicit sentences in which various things belonging to Gender IV are in violent contact with an animate undergoer (such as fruit falling from a tree, hitting a bystander), i.e.\ verbs that usually provide a good frame for testing all person/number combinations of agent and patient. Speakers were consistent in reporting that only 3\textsc{sg} actor indexing is compatible with IV agents, as in (\ref{ex:Bruno:salex}).
\ea\label{ex:Bruno:salex}
%\\
\gll {sale\GH} a- n-asib\\
inflorescence(IV) 3\textsc{sg}.\textsc{a}- 1.\textsc{u}-hit\\
\glt `The coconut inflorescence (fell and) hit me.'
\z
\noindent If this were the whole story, agent indexing would finally provide an environment where Gender IV nouns were distinguished from animate plurals. However, the generalization only seems to hold for the transitive agent-patient configuration: a small number of examples of agentive intransitives in my corpus, such as \mar{esol} `make noise' (\ref{ex:Bruno:mesin}), unambigously show 3\textsc{pl} actor indexing IV nouns (this has also been confirmed in elicitation).
\ea\label{ex:Bruno:mesin}
%\\
\gll yaba-mesin i-pe t-i-k-at-n- esol-e\\
big-machine(IV) IV-that \textsc{giv}-IV-\textsc{prs}-\textsc{prstl}-3\textsc{pl}.\textsc{a}- make.noise-\textsc{ipfv} \\
\glt `The generator is making noise.'
\z
\noindent Not even actor indexing is immune to the IV-as-animate-plural pattern, then. I take the difference in indexing between (\ref{ex:Bruno:salex}) and (\ref{ex:Bruno:mesin}) to reflect semantic restrictions on what participants may be indexed on the verb, so that the inanimate coconut inflorescence in (\ref{ex:Bruno:salex}) is not enough of an agent to be properly indexed (with actor indexing then defaulting to 3\textsc{sg}, which is also the default for avalent verbs). The verb \mar{esol} `make noise' is less picky and admits its sole argument to be fully indexed, thus giving the 3\textsc{pl} prefix. (Recall that agreement is insensitive to number of inanimates, which means that ex.~(\ref{ex:Bruno:mesin}) is equally fine referring to one or more than one generator.)
Whatever the explanations for the subtleties of person indexing turn out to be, the data presented above are roughly consistent with the main point of this and the previous section: in all contexts where \ili{Coastal Marind}, by various grammatical means, distinguishes between gender, number and animacy, nouns of Gender IV systematically pattern with plurals of Gender I and II. This is quite strange given the fact that inanimates do not show grammatical agreement according to their referential cardinality in the language (cf.\ example (\ref{ex:Bruno:Mar-intro3}) above), which makes it difficult to claim that Gender IV should be considered `fixed plural' nouns (pluralia tantum) instead of a gender. Below I will show that some tendencies in the assignment to Gender IV also are consistent with the pluralia tantum analysis, because they involve nouns that are pluralia tantum cross-linguistically. However, I will argue that this can at most be regarded as suggesting a diachronic relationship with pluralia tantum nouns, and that synchronically we must reject the description of the Gender IV nouns as pluralia tantum (\sectref{sec:Bruno:Mar-synchrony}).
\section{Assignment and pluralia tantum as a possible origin for Gender IV}
\label{sec:Bruno:Mar-assignment}
The basic principles behind the assignment of nouns to the four genders were given above: male humans are Gender I, female humans and all animals are Gender II, while inanimates are mostly in Gender III with a (large) residue in Gender IV. I do not believe that there are any clear semantic rules for deciding which of the inanimates go into Gender IV, but there are some tendencies. The only semantic fields that are completely restricted to Gender III seem to be abstracts (e.g.\ \mar{mayan} `language, issue, problem', \mar{sal} `taboo'), names of places and geographical features (\mar{milah} `village', \mar{mamuy} `savannah'), and various intangibles (\mar{matul} `shade', \mar{usus} `afternoon'). Other large semantic fields such as bodyparts and flora are split between Gender III and IV, with very few obvious subdomains assigned to one or the other (flowers is a subdomain that seems to belong to Gender IV). Artifacts are also divided between III and IV, with the only discernible patterns being that almost all bodily decorations are in Gender IV (\mar{segos} `rattan girdle', \mar{himbu} `feathered hairdress'), as well as most recently introduced technology (airplanes, ballpoint pens, diesel generators).
Looking closer, we can see that some of the domains that \cite[630]{Koptjevskaja-Tamm2001} identify as typically including pluralia tantum nouns show overlap with the members of Gender IV. These domains are: \spterm{various heterogeneous substances} (``with many subdivisions'', e.g.\ \ili{Lithuanian} \emph{putos} `foam'), corresponding to \ili{Coastal Marind} IV nouns such as \mar{ndalom} `foam', \mar{ndakindaki} `bioluminescence', \mar{kangging} `layer of crushed seashells on the beach' and \mar{katal} `money'%
\footnote{%
The noun \mar{katal} has a primary use as a Gender III noun, then with the meaning `stone'. South New Guinea is almost completely devoid of stones, and it is extremely unlikely that one encounters two or more naturally occurring stones at the same occasion. The Gender IV noun `money', on the other hand, usually occurs in collections of more than one rupiah banknote. This is an interesting case of cross-classification seemingly involving a difference in plurality.
}%
; \spterm{artificial objects which are clearly internally complex} (e.g.\ \ili{English} \emph{trousers}), corresponding to \ili{Coastal Marind} decorations and modern technology in Gender IV; \spterm{diseases} ``[that] manifest themselves as multiple visible symptoms/spots'' (e.g.\ \ili{English} \emph{measles}), corresponding to names of skin diseases in \ili{Coastal Marind}, which all turn out to be in Gender IV, such as \mar{kambi} `tinea imbricata', \mar{dapadap} `tinea versicolor' and \mar{apupin} `pimple'.
While suggestive, these findings do not form any consistent pattern. The overlap is not found with other pluralia tantum domains such as names of festivities in \ili{Coastal Marind} (e.g.\ \ili{German} \emph{Weihnachten} `Christmas'), and there are numerous exceptions, e.g.\ some artifacts that clearly qualify as internally complex (e.g.\ \mar{kipa} `net') are in Gender III rather than IV. It is also clear that \textendash{} even allowing for some semantic latitude \textendash{} the majority of nouns in Gender IV do not fit into any of Koptjevskaja-Tamm and W\"alchli's categories. I have found no reason why some names of trees are in Gender III, others in Gender IV, and it seems unlikely that plurality should have anything to do with the classification. Similarly, while it is conceivable that many bodyparts in Gender IV are somehow `plural' (e.g.\ \mar{put} `feather', \mar{tatih} `hair', \mar{tiwna} `gums', \mar{halahil} `lungs') there are plenty that are not (\mar{ambay} `uvula')
and some bodyparts seem quite plural but belong to Gender III (\mar{lul} `fur'). As pointed about by an anonymous reviewer, however, most languages with pluralia tantum have a fairly idiosyncratic assignment to the class, so the lack of consistency can hardly be an argument \emph{against} the possibility of Gender IV being related to pluralia tantum.
If we consider there to be at least some tendency for `pluralia tantum concepts' to be in Gender IV, this situation could be seen as consistent with a diachronic scenario where Gender IV started out as a class of pluralia tantum, but then acquired new members through some unknown (analogical?) process, resulting in a large, semantically heterogeneous residue gender, with a small core that still reflects the `plural semantics' of the original pluralia tantum grouping. This scenario is only plausible if (pre-)proto-\ili{Anim} (as-opposed to present-day \ili{Coastal Marind}) had a number distinction among inanimate nouns, since this would be required for inanimate pluralia tantum nouns to come into existence. Also, we would expect to find some other \ili{Anim} language that has been more conservative in this regard, and maintains a clearer semantically plural basis for the cognate fourth gender. Unfortunately, there is no systematic data on gender available from other \ili{Anim} languages to see whether such semantics can be associated with Gender IV, nor is there any indication that proto-\ili{Anim} had a number distinction among inanimates. For now this hypothesis remains purely speculative, and it can only be evaluated once there is more data on gender systems in other sub-branches of \ili{Anim}. Still, I believe it is worth spelling out this hypothesis, since it has the merit of providing an explanation to the recurrent pattern of homophony between Gender IV and animate plurals, as well as the surprising phenomenon of the suppletive plural stems triggered by all Gender IV nouns.
Interestingly, a striking parallel to the \ili{Coastal Marind} case is found in the \ili{Ok} family, located in the New Guinean highlands. The \ili{Ok} languages are probably very distant relatives of \ili{Coastal Marind} and the other \ili{Anim} languages as both families are proposed members of the large \ili{Trans-New Guinea} phylum (\citealt{Fedden2011, Usher2015}). I believe that the \ili{Ok} data support the idea that the similarities between the fourth gender of \ili{Coastal Marind} (and other \ili{Anim} languages) and what is described as pluralia tantum nouns in other languages are not coincidental, and perhaps that a diachronic relationship between these categories is plausible.
\largerpage[-1]
The best described \ili{Ok} language, \ili{Mian}, has a 4-gender system distinguishing Masculine, Feminine, and two inanimate genders \textendash{} this is the same division as in the gender systems of the \ili{Anim} languages.%
\footnote{Sebastian Fedden (pers.\ comm.\@) adds the caveat that little is known about the gender systems of other \ili{Ok} languages, so we do not know how representative the \ili{Mian} system is for \ili{Ok} in general. More descriptive work will be necessary for a fuller picture of the similarities and differences between the \ili{Anim} and \ili{Ok} gender systems.
} %
The exponents of Masculine and Feminine resemble the ones found on demonstratives in \ili{Coastal Marind} (Fedden~\citeyear[170]{Fedden2011}, Usher \& Suter~\citeyear[118]{Usher2015}): the \ili{Mian} Masculine article \emph{=e}, the Feminine \emph{=o}, and \textsc{m}/\textsc{f} plural \emph{=i} correspond to \ili{Coastal Marind} Gender I \mar{epe}, Gender II \mar{upe} and Gender I/II plural \mar{ipe} respectively. The phonological similarities might be due to chance, however, and I am not aware of any other evidence that the gender systems of the two families are cognate. Neuter 1 (the third gender) differs from the \ili{Coastal Marind} inanimates in distinguishing singular and plural (\textsc{sg} \emph{=e}, \textsc{pl} \emph{=o}). The most interesting gender is the fourth (``Neuter 2'') which is invariant for number, and shows homophony with the plural of Neuter 1 (\textsc{sg}/\textsc{pl} article \emph{=o}).
It is interesting that both \ili{Coastal Marind} and \ili{Mian} have one gender that shares their exponents with plurals, but note that the pattern of syncretism is different (homophony with inanimate plural in \ili{Mian}, but with animate plural in \ili{Coastal Marind}), and could have arisen by chance since both languages have relatively few vowels to choose from (5 in \ili{Coastal Marind}, 6 in \ili{Mian}). Speaking against accidental homophony is the fact that even in cases where several paradigm slots are filled by unpredictable gender exponents, Neuter 2 invariably patterns with the plural of Neuter 1 \citep[178--179]{Fedden2011}.
A further argument against the possibility of chance homophony between the \ili{Mian} Neuter 2 and the plural of Neuter 1 is the fact that the nouns that are assigned to Neuter 2 match the pluralia tantum domains listed by Koptjevskaja-Tamm and W\"alchli quite well \textendash{} better than the \ili{Coastal Marind} Gender IV nouns do. Assigned to \ili{Mian} Neuter 2 we find: places (e.g.\ \emph{bib} `village, place'), heterogeneous substances (e.g.\ \emph{difib} `rubbish', \emph{mon\^i} `money'), body decoration (e.g.\ \emph{am\'un} `hole in nosetip'), various abstracts and temporal nouns (e.g.\ \emph{am} `day'), illnesses (e.g.\ \emph{kl\=o} `ringworm'), various artifacts (e.g.\ \emph{it\'o} `tongs', \emph{aiglas} `glasses') and bodyparts, most of which seem to consist of multiple parts (e.g.\ \emph{ab\'o} `testicles', \emph{amunt\^em} `intestines, belly', \emph{wan\'aan} `feather').\footnote{One instance of cross-classification is striking: \ili{Mian} \emph{b\'em} `worm' (masculine gender) can also mean `noodles', and then belongs to Neuter 2; cf.\ \ili{Coastal Marind} \mar{alalin} `tapeworm' (Gender II), meaning `noodles' in Gender IV.}
Fedden does not consider the alternative analysis according to which the\linebreak Neuter~2 nouns are pluralia tantum nouns belonging to Neuter 1, and I will not pursue that issue here.%
\footnote{The reader is referred to \textcite{Corbett2017}.
} %
However, I interpret the parallelism between \ili{Coastal Marind} Gender IV and \ili{Mian} Neuter 2 as further evidence that the connection between fixed plural and fourth gender in \ili{Coastal Marind} is no coincidence, as this pattern would not arise independently in the two languages by chance. At this stage it is impossible to tell why the gender systems of \ili{Ok} and \ili{Anim} share these similarities. The two families are most likely related as members of the \ili{Trans-New Guinea} stock, but this relationship is extremely distant and must go back long in time. There is at present no evidence that the gender systems were inherited from some common ancestor, although this would account for the similarities in the gender exponents mentioned above. One could also speculate that the gender systems evolved in parallel at a time when speakers of \ili{Ok} and \ili{Anim} languages were in closer contact, but more research remains to be done before we can say anything about the contact between these ancestral populations.
Regardless of whether the similarities between \ili{Ok} and \ili{Anim} are the result of common inheritance or contact, it seems to me that the simplest explanation is that both the \ili{Anim} fourth gender and the \ili{Mian} Neuter 2 developed from pluralia tantum nouns, which explains e.g.\ the use of suppletive agreement targets in \ili{Coastal Marind} and the fact that many of the \ili{Mian} Neuter 2 nouns (and some of the Gender IV nouns in \ili{Coastal Marind}) have meanings that are found among pluralia tantum cross-linguistically. This hypothesis can be tested only through more descriptive and comparative work on the two families.
Even if it is correct, it would still remain to be shown in detail how a 3-gender system with a large number of pluralia tantum nouns can develop into a 4-gender system lacking number distinction in inanimates, as in present-day \ili{Coastal Marind}.
\section{The synchronic analysis of Gender IV}
\label{sec:Bruno:Mar-synchrony}
Having suggested that the \ili{Coastal Marind} Gender IV originated as a pluralia tantum class, we now need to address the synchronic status of Gender IV. Should we maintain the 4-gender analysis, or opt for the more economical 3-gender analysis according to which the members of the former fourth gender are Gender I or II nouns that just happen to be lexically specified as plural? I believe that this is an important analytical question \textendash{} not a mere question of which labels to stick where \textendash{} since the two possible descriptions result in wildly different systems in terms of assignment.
The literature contains some discussion of the possibility of analyzing pluralia tantum as a separate gender, in various languages. \cite[233--239]{Corbett2012} provides instructive discussion of such suggestions for \ili{Cushitic}, \ili{Chadic} and \ili{Russian}, and argues that the pluralia-tantum-as-gender analysis is untenable for all the proposed cases (i.e., the opposite of the established descriptions of \ili{Coastal Marind} and \ili{Mian}). For example, \cite{Zaliznjak1964} proposed to describe \ili{Russian} pluralia tantum nouns such as \emph{sani} `sledge(s)' as making up their own gender, since they form a unique agreement class within the system. \cite[237--238]{Corbett2012} points out that the same analysis applied to \ili{Bosnian}/\ili{Croatian}/\ili{Serbian} would produce no less than three extra genders, since this three-gender system (as opposed to \ili{Russian}) has separate plural forms for each gender, each of which contains pluralia tantum that would be reanalyzed as separate genders. This is unacceptable, so Corbett rejects the analysis for \ili{Russian} as well.
On a more general level, Corbett argues that pluralia-tantum-as-gender analyses are misinformed, since ``the special behaviour which creates the extra agreement class is not \emph{gender} but \emph{number}'' (\citealt[238]{Corbett2012}; emphasis in original). According to Corbett, proponents of pluralia-tantum-as-gender analyses mistakenly think that since pluralia tantum nouns need to be lexically specified for a morphosyntactic value (in this case number), they are just like other nouns \textendash{} which are also lexically specified, for gender \textendash{} and therefore belong to a gender of their own. Instead, the correct way is to treat them as exceptionally specified for number, and leave the gender system as it is. I interpret Corbett's remarks as a principled stance against analyses claiming that pluralia tantum nouns make up a gender.\footnote{In fact, Corbett says explicitly that this is what he means: ``Having not accepted Zaliznjak's careful and considered analysis of certain \ili{Russian} pluralia tantum nouns as an additional gender value, I am even less ready to entertain other less convincing proposals along similar lines.'' (p.~238).}
In spite of Corbett's reservations, I prefer to maintain the Drabbian analysis of Gender IV as a gender, and not as pluralia tantum of Gender I or II, although I concede that the morphosyntactic evidence for this analysis is somewhat nebulous. We saw that the exponents of Gender IV agreement are identical to the ones marking the plural of Gender I and II, no matter how irregular the alternations of the relevant target are. Verb stem alternations indexing undergoers likewise treat Gender IV and plurals of I/II identically, despite being seemingly unrelated to the agreement patterns of demonstratives and other categories in the non-verbal domains. The only domain where Gender IV nouns do not always pattern with I/II plural is actor indexing (and, possibly, recipient and possessor indexing) on verbs; however, I suspect that this reflects some general constraint against inanimates filling such participants roles, so the diagnostic role of these constructions is unclear.
But consider the consequences of abandoning the gender analysis in favour of the pluralia tantum analysis. If the members of Gender IV are considered pluralia tantum, they would make up an unexpectedly large portion of the lexicon. Assuming that the currently available numbers (Table~\ref{table:Bruno:numbers}) are representative of gender membership, one out of five nouns would be pluralia tantum. This seems strange from the European perspective, but sheer frequency can hardly be a decisive argument. More seriously, the system of semantic assignment (males in I, females and animals in II, inanimates in III and IV) would break down, since we would have to claim that Gender I and II contain a fairly random mix of animates and inanimates (all of which happen to be pluralia tantum), with non-pluralia tantum inanimates confined to Gender III.
The resulting system would also be typologically odd in the way it fails to align with the Animacy Hierarchy (\citealt{Smith-Stark1974}, \citealt[55ff.]{Corbett2000}). The hierarchy states that if there is a difference in the availability of a number distinction between e.g.\ animates and inanimates, then it will be animates that make the distinction and inanimates that lack it. \citet[59]{Corbett2000} cites \ili{Coastal Marind} as an example of a language with a clear split between animates (which trigger singular/plural agreement) and inanimates (which make no distinction according to number). In the new system, we would have to say that number is relevant for a fifth of the inanimates, although these happen to be lexically specified for plural only.
I take these consequences to be unacceptable, so the 4-gender analysis must be preferred. This comes at the price of not adhering to a strictly morphosyntactic approach to the identification of genders in \ili{Coastal Marind}, because the formal facts alone do not provide clear evidence that the four-gender description is to be preferred over a three-gender description with a large number of pluralia tantum.
\section{Conclusion}
Besides the descriptive contribution of this paper (most of which can be extracted, with some effort, from Drabbe's grammar), I consider the main points to be (1) the evidence that Usher \& Suter's (\citeyear{Usher2015}) suggestion that overt, stem-internal gender marking originated from umlaut also explains patterns in the distribution of stem-final vowels of invariant nouns within Gender III and IV; and (2) the description of the ambiguous status of the nouns in Gender IV, which led me to speculate that an earlier 3-gender system was extended into a 4-gender system, and that the 4th gender originally was a grouping of pluralia tantum nouns. As mentioned above, the idea that gender systems can be extended through the reinterpretation of a non-gender feature as gender is not new, and if the suggestions based on \ili{Coastal Marind} data are correct, the \ili{Anim} languages (and the distantly related \ili{Ok} family) would provide a clear case where a gender system became more complex because of a very specific type of interaction with number.
\section*{Acknowledgments}
I am very grateful to the \ili{Coastal Marind} speakers with whom I work, especially Petrus Kilub and Rafael Samkakai who spent many hours with me rechecking the gender of nouns. I wish to thank Matthew Lou-Magnuson for suggesting the use of resampling methods in \sectref{sec:Bruno:4genders} and Thomas H\"orberg for implementation; I alone am responsible for mistakes in the interpretation of the data. I also acknowledge the extremely helpful comments given to me by Edgar Suter, Sebastian Fedden, Bernhard Wälchli, Francesca Di Garbo, Lea Brown, and the anonymous reviewers.
\section*{Special abbreviations}
\noindent The following abbreviations are not found in the Leipzig Glossing Rules:
\medskip
\begin{tabular}{llll}
\textsc{a} &actor & \textsc{prstl} & presentational \\
\textsc{absc} & absconditive & \textsc{u} & undergoer \\
\textsc{giv} & givenness marker &&\\
\end{tabular}
\sloppy
\printbibliography[heading=subbibliography,notkeyword=this]
\end{document}
| {
"alphanum_fraction": 0.7623175002,
"avg_line_length": 131.3975694444,
"ext": "tex",
"hexsha": "ba3018f4b14e8500a351b03f69534929904db86d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ea6237c615cb72a22455fdf221866093c7c5b5c3",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "langsci/223",
"max_forks_repo_path": "chapters/08.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ea6237c615cb72a22455fdf221866093c7c5b5c3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "langsci/223",
"max_issues_repo_path": "chapters/08.tex",
"max_line_length": 1727,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ea6237c615cb72a22455fdf221866093c7c5b5c3",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "langsci/223",
"max_stars_repo_path": "chapters/08.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 21335,
"size": 75685
} |
%!TEX root = ../OGUSAdoc.tex
In \ogindia, the government enters by levying taxes on households, providing transfers to households, levying taxes on firms, spending resources on public goods, and making rule-based adjustments to stabilize the economy in the long-run. It is this last activity that is the focus of this chapter.
\section{Government Tax Revenue}\label{SecUnbalGBCrev}
We see from the household's budget constraint that taxes $T_{s,t}$ and transfers $TR_{t}$ enter into the household's decision,
\begin{equation}\tag{\ref{EqHHBC}}
\begin{split}
c_{j,s,t} + b_{j,s+1,t+1} &= (1 + r_{t})b_{j,s,t} + w_t e_{j,s} n_{j,s,t} + \zeta_{j,s}\frac{BQ_t}{\lambda_j\omega_{s,t}} + \eta_{j,s,t}\frac{TR_{t}}{\lambda_j\omega_{s,t}} - T_{s,t} \\
&\quad\forall j,t\quad\text{and}\quad s\geq E+1 \quad\text{where}\quad b_{j,E+1,t}=0\quad\forall j,t
\end{split}
\end{equation}
where we defined the tax liability function $T_{s,t}$ in \eqref{EqTaxCalcLiabETR} as an effective tax rate times total income and the transfer distribution function $\eta_{j,s,t}$ is uniform across all households as in \eqref{EqTaxCalcEtajs}. And government revenue from the corporate income tax rate $\tau^{corp}$ and the tax on depreciation expensing $\tau^\delta$ enters the firms' profit function.
\begin{equation}\tag{\ref{EqFirmsProfit}}
PR_t = (1 - \tau^{corp})\bigl(Y_t - w_t L_t\bigr) - \bigl(r_t + \delta\bigr)K_t + \tau^{corp}\delta^\tau K_t \quad\forall t
\end{equation}
We define total government revenue from taxes as the following.
\begin{equation}\label{EqUnbalGBCgovRev}
Rev_t = \underbrace{\tau^{corp}\bigl[Y_t - w_t L_t\bigr] - \tau^{corp}\delta^\tau K_t}_{\text{corporate tax revenue}} + \underbrace{\sum_{s=E+1}^{E+S}\sum_{j=1}^J\lambda_j\omega_{s,t}\tau^{etr}_{s,t}\left(x_{j,s,t},y_{j,s,t}\right)\bigl(x_{j,s,t} + y_{j,s,t}\bigr)}_{\text{household tax revenue}} \quad\forall t
\end{equation}
\section{Government Budget Constraint}\label{SecUnbalGBCbudgConstr}
Let the level of government debt in period $t$ be given by $D_t$. The government budget constraint requires that government revenue $Rev_t$ plus the budget deficit ($D_{t+1} - D_t$) equal expenditures on interest of the debt, government spending on public goods $G_t$, and total transfer payments to households $TR_t$ every period $t$.
\begin{equation}\label{EqUnbalGBCbudgConstr}
D_{t+1} + Rev_t = (1 + r_t)D_t + G_t + TR_t \quad\forall t
\end{equation}
We assume that total government transfers to households are a fixed fraction $\alpha_{tr}$ of GDP each period.
\begin{equation}\label{EqUnbalGBCtfer}
TR_t = g_{tr,t}\:\alpha_{tr}\: Y_t \quad\forall t
\end{equation}
The time dependent multiplier $g_{tr,t}$ in front of the right-hand-side of \eqref{EqUnbalGBCtfer} will equal 1 in most initial periods. It will potentially deviate from 1 in some future periods in order to provide a closure rule that ensures a stable long-run debt-to-GDP ratio. We will discuss the closure rule in Section \ref{SecUnbalGBCcloseRule}.
We also assume that government spending on public goods is a fixed fraction of GDP each period in the initial periods.
\begin{equation}\label{EqUnbalGBC_Gt}
G_t = g_{g,t}\:\alpha_{g}\: Y_t
\end{equation}
Similar to transfers $TR_t$, the time dependent multiplier $g_{g,t}$ in front of the right-hand-side of \eqref{EqUnbalGBC_Gt} will equal 1 in most initial periods. It will potentially deviate from 1 in some future periods in order to provide a closure rule that ensures a stable long-run debt-to-GDP ratio. We make this more specific in the next section.
\section{Interest Rate on Government Debt}\label{SecRateWedge}
Despite the model having no aggregate risk, it may be helpful to build in an interest rate differential between the rate of return on private capital and the interest rate on government debt. Doing so helps to add realism by including a risk premium. \ogindia allows users to set an exogenous wedge between these two rates. The interest rate on government debt,
\begin{equation}\label{EqUnbalGBC_rate_wedge}
r_{gov, t} = (1 - \tau_{d, t})r_{t} - \mu_{d}
\end{equation}
The two parameters, $\tau_{d,t}$ and $\mu_{d,t}$ can be used to allow for a government interest rate that is a percentage hair cut from the market rate or a government interest rate with a constant risk premia.
In the cases where there is a differential ($\tau_{d,t}$ or $\mu_{d,t} \neq 0$), then we need to be careful to specify how the household chooses government debt and private capital in its portfolio of asset holdings. We make the assumption that under the exogenous interest rate wedge, the household is indifferent between holding its assets as debt and private capital. This amounts to an assumption that these two assets are perfect substitutes given the exogenous wedge in interest rates. Given the indifference between government debt and private capital at these two interest rates, we assume that the household holds debt and capital in the same ratio that debt and capital are demanded by the government and private firms, respectively. The interest rate on the household portfolio of asset is thus given by:
\begin{equation}\label{EqUnbalGBC_rate_wedge}
r_{hh,t} = \frac{r_{gov,t}D_{t} + r_{t}K_{t}}{D_{t} + K_{t}}
\end{equation}
\section{Budget Closure Rule}\label{SecUnbalGBCcloseRule}
If total government transfers to households $TR_t$ and government spending on public goods $G_t$ are both fixed fractions of GDP, one can imagine corporate and household tax structures that cause the debt level of the government to either tend toward infinity or to negative infinity, depending on whether too little revenue or too much revenue is raised, respectively.
A virtue of dynamic general equilibrium models is that the model must be stationary in order to solve it. That is, no variables can be indefinitely growing as time moves forward. The labor augmenting productivity growth $g_y$ from Chapter \ref{Chap_Firms} and the potential population growth $\tilde{g}_{n,t}$ from Chapter \ref{Chap_Demog} render the model nonstationary. But we show how to stationarize the model against those two sources of growth in Chapter \ref{Chap_Stnrz}. However, even after stationarizing the effects of productivity and population growth, the model could be rendered nonstationary and, therefore, not solvable if government debt were becoming too positive or too negative too quickly.
The \ogindia model offers three different options for budget closure rules. Each rule uses some combination of changes in government spending on public goods $G_t$ and government transfers to households $TR_t$ to stabilize the debt-to-GDP ratio in the long-run.
\begin{enumerate}
\item Change only government spending on public goods $G_t$.
\item Change only government transfers to households $TR_t$.
\item Change both government spending $G_t$ and transfers $TR_t$ by the same percentage.
\end{enumerate}
\subsection{Change government spending only}\label{SecUnbalGBC_chgGt}
We specify a closure rule that is automatically implemented after some period $T_{G1}$ to stabilize government debt as a percent of GDP (debt-to-GDP ratio). Let $\alpha_D$ represent the long-run debt-to-GDP ratio at which we want the economy to eventually settle.
\begin{equation}\label{EqUnbalGBCclosure_Gt}
\begin{split}
&G_t = g_{g,t}\:\alpha_{g}\: Y_t \\
&\text{where}\quad g_{g,t} =
\begin{cases}
1 \qquad\qquad\qquad\qquad\qquad\qquad\qquad\:\:\:\,\text{if}\quad t < T_{G1} \\
\frac{\left[\rho_{d}\alpha_{D}Y_{t} + (1-\rho_{d})D_{t}\right] - (1+r_{t})D_{t} - TR_{t} + Rev_{t}}{\alpha_g Y_t} \quad\text{if}\quad T_{G1}\leq t<T_{G2} \\
\frac{\alpha_{D}Y_{t} - (1+r_{t})D_{t} - TR_{t} + Rev_{t}}{\alpha_g Y_t} \qquad\qquad\quad\:\:\:\,\text{if}\quad t \geq T_{G2}
\end{cases} \\
&\quad\text{and}\quad g_{tr,t} = 1 \quad\forall t
\end{split}
\end{equation}
The first case in \eqref{EqUnbalGBCclosure_Gt} says that government spending $G_t$ will be a fixed fraction $\alpha_g$ of GDP $Y_t$ for every period before $T_{G1}$. The second case specifies that, starting in period $T_{G1}$ and continuing until before period $T_{G2}$, government spending be adjusted to set tomorrow's debt $D_{t+1}$ to be a convex combination between $\alpha_D Y_t$ and the current debt level $D_t$, where $\alpha_D$ is a target debt-to-GDP ratio and $\rho_d\in(0,1]$ is the percent of the way to jump toward the target $\alpha_D Y_t$ from the current debt level $D_t$. The last case specifies that, for every period after $T_{G2}$, government spending $G_t$ is set such that the next-period debt be a fixed target percentage $\alpha_D$ of GDP.
\subsection{Change government transfers only}\label{SecUnbalGBC_chgTRt}
If government transfers to households are specified by \eqref{EqUnbalGBCtfer} and the long-run debt-to-GDP ratio can only be stabilized by changing transfers, then the budget closure rule must be the following.
\begin{equation}\label{EqUnbalGBCclosure_TRt}
\begin{split}
&TR_t = g_{tr,t}\:\alpha_{tr}\: Y_t \\
&\text{where}\quad g_{tr,t} =
\begin{cases}
1 \qquad\qquad\qquad\qquad\qquad\qquad\qquad\:\text{if}\quad t < T_{G1} \\
\frac{\left[\rho_{d}\alpha_{D}Y_{t} + (1-\rho_{d})D_{t}\right] - (1+r_{t})D_{t} - G_{t} + Rev_{t}}{\alpha_{tr} Y_t} \quad\text{if}\quad T_{G1}\leq t<T_{G2} \\
\frac{\alpha_{D}Y_{t} - (1+r_{t})D_{t} - G_{t} + Rev_{t}}{\alpha_{tr} Y_t} \qquad\qquad\quad\:\:\:\:\text{if}\quad t \geq T_{G2}
\end{cases} \\
&\quad\text{and}\quad g_{g,t} = 1 \quad\forall t
\end{split}
\end{equation}
The first case in \eqref{EqUnbalGBCclosure_TRt} says that government transfers $TR_t$ will be a fixed fraction $\alpha_{tr}$ of GDP $Y_t$ for every period before $T_{G1}$. The second case specifies that, starting in period $T_{G1}$ and continuing until before period $T_{G2}$, government transfers be adjusted to set tomorrow's debt $D_{t+1}$ to be a convex combination between $\alpha_D Y_t$ and the current debt level $D_t$. The last case specifies that, for every period after $T_{G2}$, government transfers $TR_t$ are set such that the next-period debt be a fixed target percentage $\alpha_D$ of GDP.
\subsection{Change both government spending and transfers}\label{SecUnbalGBC_chgGtTRt}
In some cases, changing only government spending $G_t$ or only government transfers $TR_t$ will not be enough. That is, there exist policies for which a decrease in government spending to zero after period $T_{G1}$ will not stabilize the debt-to-GDP ratio. And negative government spending on public goods does not make sense.\footnote{Negative values for government spending on public goods would mean that revenues are coming into the country from some outside source, which revenues are triggered by government deficits being too high in an arbitrary future period $T_{G2}$.} On the other hand, negative transfers do make sense. Notwithstanding, one might want the added stabilization ability of changing both government spending $G_t$ and transfers $TR_t$ to stabilize the long-run debt-to-GDP ratio.
In our specific form of this joint option, we assume that the factor by which we scale government spending and transfers is the same $g_{g,t} = g_{tr,t}$ for all $t$. We label this single scaling factor $g_{trg,t}$.
\begin{equation}\label{EqUnbalGBCclosure_gTRGt}
g_{trg,t}\equiv g_{g,t} = g_{tr,t} \quad\forall t
\end{equation}
If government spending on public goods is specified by \eqref{EqUnbalGBC_Gt} and government transfers to households are specified by \eqref{EqUnbalGBCtfer} and the long-run debt-to-GDP ratio can only be stabilized by changing both spending and transfers, then the budget closure rule must be the following.
\begin{equation}\label{EqUnbalGBCclosure_TRGt}
\begin{split}
&G_t + TR_t = g_{trg,t}\left(\alpha_g + \alpha_{tr}\right)Y_t \quad\Rightarrow\quad G_t = g_{trg,t}\:\alpha_g\: Y_t \quad\text{and}\quad TR_t = g_{trg,t}\:\alpha_{tr}\:Y_t \\
&\text{where}\quad g_{trg,t} =
\begin{cases}
1 \qquad\qquad\qquad\qquad\qquad\qquad\:\:\:\,\text{if}\quad t < T_{G1} \\
\frac{\left[\rho_{d}\alpha_{D}Y_{t} + (1-\rho_{d})D_{t}\right] - (1+r_{t})D_{t} + Rev_{t}}{\left(\alpha_g + \alpha_{tr}\right)Y_t} \quad\text{if}\quad T_{G1}\leq t<T_{G2} \\
\frac{\alpha_{D}Y_{t} - (1+r_{t})D_{t} + Rev_{t}}{\left(\alpha_g + \alpha_{tr}\right)Y_t} \qquad\qquad\quad\:\:\:\:\text{if}\quad t \geq T_{G2}
\end{cases}
\end{split}
\end{equation}
The first case in \eqref{EqUnbalGBCclosure_TRGt} says that government spending and government transfers $Tr_t$ will their respective fixed fractions $\alpha_g$ and $\alpha_{tr}$ of GDP $Y_t$ for every period before $T_{G1}$. The second case specifies that, starting in period $T_{G1}$ and continuing until before period $T_{G2}$, government spending and transfers be adjusted by the same rate to set tomorrow's debt $D_{t+1}$ to be a convex combination between $\alpha_D Y_t$ and the current debt level $D_t$. The last case specifies that, for every period after $T_{G2}$, government spending and transfers are set such that the next-period debt be a fixed target percentage $\alpha_D$ of GDP.
Each of these budget closure rules \eqref{EqUnbalGBCclosure_Gt}, \eqref{EqUnbalGBCclosure_TRt}, and \eqref{EqUnbalGBCclosure_TRGt} allows the government to run increasing deficits or surpluses in the short run (before period $T_{G1}$). But then the adjustment rule is implemented gradually beginning in period $t=T_{G1}$ to return the debt-to-GDP ratio back to its long-run target of $\alpha_D$. Then the rule is implemented exactly in period $T_{G2}$ by adjusting some combination of government spending $G_t$ and transfers $TR_t$ to set the debt $D_{t+1}$ such that it is exactly $\alpha_D$ proportion of GDP $Y_t$.
\section{Some Caveats and Alternatives}\label{SecUnbalGBCcaveat}
\ogindia adjusts some combination of government spending $G_t$ and government transfers $TR_t$ as its closure rule instrument because of its simplicity and lack of distortionary effects. Since government spending does not enter into the household's utility function, its level does not affect the solution of the household problem. In contrast, government transfers do appear in the household budget constraint. However, household decisions do not individually affect the amount of transfers, thereby rendering government transfers as exogenous from the household's perspective. As an alternative, one could choose to adjust taxes to close the budget (or a combination of all of the government fiscal policy levers).
There is no guarantee that any of our stated closure rules \eqref{EqUnbalGBCclosure_Gt}, \eqref{EqUnbalGBCclosure_TRt}, or \eqref{EqUnbalGBCclosure_TRGt} is sufficient to stabilize the debt-to-GDP ratio in the long run. For large and growing deficits, the convex combination parameter $\rho_d$ might be too gradual, or the budget closure initial period $T_{G1}$ might be too far in the future, or the target debt-to-GDP ratio $\alpha_D$ might be too high. The existence of any of these problems might be manifest in the steady state computation stage. However, it is possible for the steady-state to exist, but for the time path to never reach it. These problems can be avoided by choosing conservative values for $T_{G1}$, $\rho_d$, and $\alpha_D$ that close the budget quickly.
And finally, in closure rules \eqref{EqUnbalGBCclosure_Gt} and \eqref{EqUnbalGBCclosure_TRGt} in which government spending is used to stabilize the long-run budget, it is also possible that government spending is forced to be less than zero to make this happen. This would be the case if tax revenues bring in less than is needed to financed transfers and interest payments on the national debt. None of the equations we've specified above preclude that result, but it does raise conceptual difficulties. Namely, what does it mean for government spending to be negative? Is the government selling off pubic assets? We caution those using this budget closure rule to consider carefully how the budget is closed in the long run given their parameterization. We also note that such difficulties present themselves across all budget closure rules when analyzing tax or spending proposals that induce structural budget deficits. In particular, one probably needs a different closure instrument if government spending must be negative in the steady-state to hit your long-term debt-to-GDP target.
| {
"alphanum_fraction": 0.7422994269,
"avg_line_length": 115.5310344828,
"ext": "tex",
"hexsha": "877126403494479a3bea74c977987954595fa838",
"lang": "TeX",
"max_forks_count": 44,
"max_forks_repo_forks_event_max_datetime": "2021-07-08T07:03:26.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-08-16T15:10:39.000Z",
"max_forks_repo_head_hexsha": "269ee172b837882c826ee7f99507d93f9643128e",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "keshavchoudhary87/OG-India",
"max_forks_repo_path": "docs/LaTeXsource/Chapters/Chap_UnbalGBC.tex",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "269ee172b837882c826ee7f99507d93f9643128e",
"max_issues_repo_issues_event_max_datetime": "2019-10-16T07:07:15.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-08-16T15:40:52.000Z",
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "keshavchoudhary87/OG-India",
"max_issues_repo_path": "docs/LaTeXsource/Chapters/Chap_UnbalGBC.tex",
"max_line_length": 1092,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "269ee172b837882c826ee7f99507d93f9643128e",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "keshavchoudhary87/OG-India",
"max_stars_repo_path": "docs/LaTeXsource/Chapters/Chap_UnbalGBC.tex",
"max_stars_repo_stars_event_max_datetime": "2019-08-17T19:49:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-08-17T19:49:22.000Z",
"num_tokens": 4606,
"size": 16752
} |
\chapter{Environment}\label{chapter:environment}
In this chapter we discuss about the simulating environment that we used or developed in our experiments. It is divided in 3 sections: Server's Referee: the changes we made on HFO's referee; Feature set: the features we chose to receive from HFO's environment; Actions: the output of our Neural Networks; Reward System: the learning conditions to our Neural Networks.
\section{HFO Server's Referee}
The Half Field Offensive environment has a principle: start an episode with the ball positioned in the middle field. That is not applicable to our real domain. We did some changes in HFO's Server due to this incompatibility of domains:
\begin{itemize}
\item Half Field Offensive's referee restricts all agents on the right side of the field. Our problem is more complex due to defending on opponent's side as well. Our referee covers the whole field.
\item Given two areas O and T, opponents spawns randomly on O and the same for teammates on T. We need a spawn similar to a real attack situation in Soccer Simulation 2D. For the defender team We fixed specifics X axis for types of agent and randomizes the Y axis. The midfielders starts in front of the defenders. For the attacking team, is the same logic for attackers and midfielders. We decided to do not randomize the Y axis and fix it on 0 for all attackers.
\item The ball always starts near the midfielders of the attacking team in our environment. Doing so, We simulate a counterattack of the opponent.
\item We use a noise-free server due to the agents initial learning. Once well trained, it can train in a noisy system.
\end{itemize}
\section{Feature Set}
HFO's high-level features set returns many features that is not relevant for our problem, such as opening angle to opponent's goal or pass opening angle to a teammate. We decided to remove those variables for the models understand more easier what to do. Another change was in the normalization of the features. The original environment returned normalized features in relation due to the half field problem. Once our problem is more comprehensive and the agents are in the same space and the features are strict, We maintained the without normalization.
Let $T$ denote the number of teammates in game and $O$ the
number of opponents. There are a total of $10 + 3T + 2O + 1$ high-level
features in our environment.
\begin{enumerate}[noitemsep]
\setcounter{enumi}{-1}
\item{\textbf{X position} - The agent’s x-position on the field.}
\item{\textbf{Y position} - The agent’s y-position on the field.}
\item{\textbf{Orientation} - The global direction that the agent is facing.}
\item{\textbf{Ball X} - The ball's x-position on the field.}
\item{\textbf{Ball Y} - The ball's y-position on the field.}
\item{\textbf{Able to Kick} - Boolean indicating if the agent can kick the ball.}
\item{\textbf{Goal Center Proximity} - Agent's proximity to the center of the goal.}
\item{\textbf{Goal Center Angle} - Angle from the agent to the center of the goal.}
\item{\textbf{Proximity to Opponent} - If an opponent is present,
proximity to the closest opponent. Invalid if there are no
opponents.}
\item [$T$] {\textbf{Proximity from Teammate i to Opponent} - For each
teammate i: the proximity from the teammate to the closest
opponent. This feature is invalid if there are no opponents or if
teammates are present but not detected.}
\item [$2T$] {\textbf{X, Y of Teammates} - For each teammate: the x-position, y-position.}
\item [$2O$] {\textbf{X, Y of Opponents} - For each opponent: the x-position, y-position.}
\item [$+1$] {\textbf{Interceptable} - Whether the agent can intercept the ball or
not.}
\end{enumerate}
\section{Actions}
Based on \cite{cyrus}, We chose four actions:
\begin{enumerate}
\item Move: Performs the basic move, going to the position according to the formation file.
\item Go to ball: Performs an interception move, tackling the opponent when it can.
\item Defend Goal: Goes to the circumcenter position of the triangle goalie position, right or left goal post position and attacker position.
\item Block: Performs \cite{marlik2011}'s Marlik Block.
\end{enumerate}
We decided to choose Marlik's block over Cyrus' due to Cyrus' harassment and effectiveness. Cyrus' block is too advanced and that induces great chances of a dribble and then a goal.
\section{Reward System}
The reward system is also very similar to \cite{cyrus}'s. We just removed the cycles condition. We tried to adapt \cite{tgze}'s ball potential equation to Soccer Simulation 2d domain to be a reward as well but it did not fit in our experiments. | {
"alphanum_fraction": 0.7687352373,
"avg_line_length": 83.1607142857,
"ext": "tex",
"hexsha": "241369be291c40374ff84c53498b27da06862ed0",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-08-08T02:30:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-18T01:33:35.000Z",
"max_forks_repo_head_hexsha": "d0237207dbb485611c685251f97649679a7bbc0a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bcahlit/graduationMgm",
"max_forks_repo_path": "Evaluating Reinforcement Learning on Robocup Soccer Simulation 2D/chapters/environment.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d0237207dbb485611c685251f97649679a7bbc0a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bcahlit/graduationMgm",
"max_issues_repo_path": "Evaluating Reinforcement Learning on Robocup Soccer Simulation 2D/chapters/environment.tex",
"max_line_length": 555,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "ee264d6f1bffdbbbb04c55c8f43146604d54cc88",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cstiano/graduationMgm",
"max_stars_repo_path": "Evaluating Reinforcement Learning on Robocup Soccer Simulation 2D/chapters/environment.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-16T14:50:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-19T19:10:07.000Z",
"num_tokens": 1124,
"size": 4657
} |
\subsubsection{Recommendations}
\begin{lstlisting}[language=sql]
/* function returnSeriesSameMaker($serie) */
select *
from series
where series.title != ? and makerid = ?,
[$serie->title, $serie->makerId];
/* function returnSeriesSameDifficulty($serie) */
$difficulty = select *
from types
where types.id = ?, [$serie->tId]);
select *
from series, types
where series.title != ?
and series.tid = types.id
and types.difficulty = ?,
[$serie->title, $difficulty[0]->difficulty];
/* function returnSeriesSameRating($serie) { */
/* $rating = */
select *
from series, series_ratings
where series.id = series_ratings.seriesid and series.id = ?, [$serie->id];
/* if (!empty($rating)): */
select *
from series, series_ratings
where series.id = series_ratings.seriesid
and series.id != ?
and series_ratings.rating = ? ,
[$serie->id, $rating[0]->rating];
/* return: */
select *
from series
where series.id != series.id;
/* function isEmptySeries($serie) */
select *
from series, exercises, exercises_in_series
where series.id = exercises_in_series.seriesid
and exercises.id = exercises_in_series.exid
and series.id = ?, [$serie->id];
\end{lstlisting}
| {
"alphanum_fraction": 0.7057808456,
"avg_line_length": 25.1956521739,
"ext": "tex",
"hexsha": "0d185e5bc458a6e23ed08dc9f35ed71e596a46a5",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ab3443369a1cc6d2a4b5d3e85c0e8ef7ff98c4f1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "arminnh/Programming-project-databases",
"max_forks_repo_path": "verslagen/report_files/appendix/helpers/recommendations.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ab3443369a1cc6d2a4b5d3e85c0e8ef7ff98c4f1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "arminnh/Programming-project-databases",
"max_issues_repo_path": "verslagen/report_files/appendix/helpers/recommendations.tex",
"max_line_length": 74,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ab3443369a1cc6d2a4b5d3e85c0e8ef7ff98c4f1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "arminnh/Programming-project-databases",
"max_stars_repo_path": "verslagen/report_files/appendix/helpers/recommendations.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 290,
"size": 1159
} |
%\documentstyle[russian]{artr}
%\documentstyle[14pt,russian]{artr}
\documentstyle{article}
%\pagestyle{empty}
%This macro will enable to use others in non-math mode
\def\MAP#1{\ifmmode{#1}\else{$#1$}\fi}
%standard abs
\def\abs#1{\MAP{\mid\!\!#1\!\!\mid}}
%definition for dirac vectors and product
\def\ket#1{\MAP{\mid\!\!#1\!\!>}}
\def\bra#1{\MAP{<\!\!#1\!\!\mid}}
\def\dirProd#1#2{\MAP{<\!\!#1\!\mid\!#2\!\!>}}
%derivatives: partial and ordinary in different forms.
\def\pDer#1#2{\MAP{\frac{\partial #1}{\partial #2}}}
\def\pDerF#1#2{\MAP{\partial_#2 #1}}
\def\pDerB#1#2{\MAP{\left(#1\right)^\prime_#2}}
\def\oDer#1#2{\MAP{\frac{d#1}{d#2}}}
\def\oDerB#1#2{\MAP{left(#1\right)^\prime}}
%I prefer to use next command to write math formula in red line.
\def\be{\[}
\def\ee{\]}
\def\bel#1{\begin{equation}\label{#1}} % to label formula
\def\eel{\end{equation}} % to label formula
%standard representation for refferences to formulas
\def\rf#1{(\ref{#1})}
\begin{document}
\title{Connection and topological charges for some systems}
%\maketitle
\section{About this document (Introduction).}
The following calculation is a modification of one that I did last year
with usage of non-orthogonal basis. In this version I avoided to use it
so now I do not need to speak about correctness of the usage.
Unfortunately I have to use a lot of non-standard notations just to keep
formulas more or less compact. I only hope that comments in this very
draft representation make reading not do difficult.
\section{General words.}
Consider a $M$-level quantum system with hamiltonian $H$ which has
$N$ degenerated level with energy $E$ and let $\ket{n^i}$, $i = 1..N$
form a orthogonal basis in the level state space, So:
\bel{BasisDef}
\dirProd{n_i}{n^j} = \delta_i^j, \quad H\ket{n^i} = E\ket{n^i},
\quad i,j=1..N.
\eel
Then corresponding nonabelian connection which appears during evolution
of the system has next form:
\be
(A^i_j)_\mu = \dirProd{n^i}{\pDer{n_j}{x^\mu}}.
\ee
To make following calculations more compact I introduce now
indexless notation. Let $n$ denotes $M\times N$ matrix defined by:
\be
(n)^i_a = (\ket{n^i})_a,
\quad i=1..N, \quad a=1..M
\ee
where $(\ket{n})_a$ is $a$ component of a vector \ket{n}. Then \rf{BasisDef}
can be rewritten as:
\bel{BasisRedef}
n^+n=I_N, \quad H^\prime n = 0, \quad H^\prime \equiv H - EI_M,
\eel
and by introducing matrix valued differential form $A$:
\be
(A)^i_j = (A^i_j)_\mu \wedge dx^\mu
\ee
I have:
\bel{ADef}
A = n^+dn.
\eel
It is easy to see that under $U(N)$ transformation
\be
n \rightarrow \tilde n = n\omega
\ee
given by unitary $N\times N$ matrix $\omega$ identities \rf{BasisRedef}
will be hold:
\be
\tilde n^+ \tilde n = \omega^+n^+n\omega = \omega^+I_N\omega =
\omega^+\omega = I_N,
\ee
\be
H^\prime \tilde n = H^\prime n \omega = 0 \omega = 0.
\ee
and for $\tilde A$ I have:
\be
\tilde A = \tilde n^+ d\tilde n = \omega^+ n^+ d(n\omega)
= \omega^+ n^+ dn \omega + \omega^+ n^+n d\omega =
\omega^{-1} A \omega + \omega^{-1} d\omega,
\ee
i.e. standard connection transformation law.
\section{Basis representation.}
As it follows from the previous section results
to construct the particular connection one should select a
parametrization of $n$ and then use \rf{ADef} to calculate $A$. One possible
way to choose $n$ is based on next approach.
The requirement the energy level should be $N$-degenerated means that
$rank\, H^\prime = M-N$ or in other words that there is
$(M-N)\times (M-N)$ submatrix in $H^\prime$ with non-zero determinant.
By permutation of rows and columns it is always possible to move the submatrix
to the right bottom conner and write for $H^\prime$:
\be
H^\prime = \pmatrix{h_{11} & h_{12} \cr h_{21} & h_{22} \cr },
\quad det h_{22} \ne 0,
\quad n = \pmatrix{\rho \cr \rho_\prime \cr}
\ee
where $h_{11}$,$h_{12}$,$h_{21}$,$h_{22}$,$\rho$,$\rho_\prime$ are
$N\times N$, $N\times (M-N)$,$(M-N)\times N$,$(M-N)\times (M-N)$,
$N\times N$, $(M-N)\times N$ matrixes.
So $H^\prime n = 0$ is equivalent to:
\bel{eqn_for_n}
h_{21}\rho + h_{22}\rho_\prime = 0 \mbox{ or }
\rho_\prime = - (h_{22})^{-1}h_{21}\rho
\eel
because other equations:
\bel{first_eqn_for_H}
h_{11}\rho + h_{12}\rho_\prime = 0
\eel
due to $det h_{22} \ne 0$
and $rank H^\prime = M-N$ is just linear combination of \rf{eqn_for_n}.
Moreover it can be used to express $h_{11}$ in terms of others $h_{ij}$:
\be
h_{11} = -h_{12}\rho_\prime \rho^{-1} = h_{12}(h_{22})^{-1}h_{21},
\ee
the last equation is the same as $H^\prime n = 0$ and from it follows
that elements of $h_{21}$, $h_{22}$ can be treated as independent variables
and origin $H$ can be expressed in terms of them and energy value $E$
by following procedure:
1. Define $h_{12} = h_{21}^+$.
2. Define $h_{11} = h_{12}(h_{22})^{-1}h_{21}$. This fully determines
$H^\prime$.
3. Define $H = H^\prime + E$.
The requirement of basis orthogonality $n^+n = 1$ leads to:
\be
1_N = \rho^+\rho + \rho_\prime^+\rho_\prime =
\rho^+(1_N + h_{21}^+(h_{22})^{-2}h_{21})\rho.
\ee
So $det \rho \ne 0$ which gives:
\bel{RhoCondition}
(\rho^+)^{-1}(\rho)^{-1} = (\rho\rho^+)^{-1} =
1_N + h_{21}^+(h_{22})^{-2}h_{21}.
\eel
Last equation is only one condition for $\rho$ and for any such $\rho$ I have
for $n$:
\bel{n_is}
n = \pmatrix{\rho \cr - (h_{22})^{-1}h_{21}\rho \cr}
= \pmatrix{1_N \cr x^+ \cr} \rho,
\eel
where by definition for $N\times (M-N)$: $x^+ = - (h_{22})^{-1}h_{21}$.
From the fact that $h_{21}$ is independent from $h_{22}$ and its elements
can be any complex values and from $det h_{22} \ne 0$
it follow that elements of $x$
can be any complex values too and possible values of $x$ cover
whole :
\bel{space_is}
C^{N\times (M-N)} \equiv R^{2N\times (M-N)}.
\eel
Important note:
The last statement does not refer to the whole possible topology of
hamiltonian parameter space due to for example the restriction
$det h_{22} \ne 0$, it just refer to domain for $x$.
I do not like this prove that the domain is $C^{N\times (M-N)}$ -
but currently I do not know how to express it in more strict way.
And in any case it is important to write some consideration of possible
topology of quantum system states.
\section{Connection and field tensor.}
Calculations of connection $A$ from \rf{n_is} straightforward:
\be
A = n^+dn = \rho^+\pmatrix{1_N^+ & x \cr }
d(\pmatrix{1_N \cr x^+ \cr } \rho)
\ee
\be
= \rho^+xdx^+\rho + \rho^+(1+xx^+)d\rho
= \rho^{-1}\rho\rho^+(xdx^+\rho + (1+xx^+)d\rho).
\ee
But
\be
(\rho\rho^+)^{-1} = 1_N + h_{21}^+(h_{22})^{-2}h_{21} = 1 + xx^+,
\ee
this leads to:
\be
A = \rho^{-1}(1 + xx^+)^{-1}(xdx^+\rho + (1+xx^+)d\rho)
= \rho^{-1}(1 + xx^+)^{-1}xdx^+\rho + \rho^{-1}d\rho
\ee
Last formula looks like transformed by matrix $\rho$ connection:
\be
\tilde A = (1 + xx^+)^{-1}xdx^+,
\ee
but because in general $\rho\rho^+ = (1 + xx^+)^{-1} \ne 1$ this is not a
$U(N)$ transformation. Nevertheless this representation is useful in calculation
of $F$ because it will look as transformed by $\rho$ $\tilde F$. Proof is
the same as for ordinary $U(N)$ transformation :
\be
F = dA + A \wedge A = d(\rho^{-1}\tilde A \rho + \rho^{-1}d\rho) +
\rho^{-1}(\tilde A \rho + d\rho) \wedge \rho^{-1}(\tilde A \rho + d\rho)
\ee
\be
= -\rho^{-1}d\rho\rho^{-1}\wedge\tilde A \rho + \rho^{-1}d\tilde A\rho
- \rho^{-1}\tilde A \wedge d\rho - \rho^{-1}d\rho\wedge\rho^{-1}d\rho
\ee
\be
+ \rho^{-1}\tilde A\wedge\tilde A\rho + \rho^{-1}\tilde A d\rho
+ \rho^{-1}d\rho\wedge\rho^{-1}\tilde A \rho
+ \rho^{-1}d\rho\wedge\rho^{-1}d\rho
\ee
\be
= \rho^{-1}(d\tilde A + \tilde A\wedge\tilde A)\rho = \rho^{-1}\tilde F\rho,
\ee
where next identity is used: $d\rho^{-1} = -\rho^{-1}d\rho\rho^{-1}$ and
$d(\omega_P\wedge\omega_Q) = d\omega_P \wedge \omega_Q
+(-1)^P \omega_P\wedge\omega_Q$ for any differential forms of rank $P$ and $Q$.
For $\tilde F$ I have:
\be
\tilde F = d((1 + xx^+)^{-1}xdx^+) +
(1 + xx^+)^{-1}xdx^+\wedge (1 + xx^+)^{-1}xdx^+
\ee
\be
= -(1 + xx^+)^{-1}d(1 + xx^+)(1 + xx^+)^{-1}\wedge xdx^+
\ee
\be
+ (1 + xx^+)^{-1}dx\wedge dx^+
+(1 + xx^+)^{-1}xdx^+\wedge (1 + xx^+)^{-1}xdx^+
\ee
\be
= (1 + xx^+)^{-1}\left(
(-d(xx^+) + xdx^+)(1 + xx^+)^{-1}x + dx\right)\wedge dx^+
\ee
\be
= (1 + xx^+)^{-1}dx\left(-x^+(1 + xx^+)^{-1}x + 1\right)\wedge dx^+,
\ee
But $(1 + xx^+)^{-1}x = x(1 + x^+x)^{-1}$ due to:
\be
(1 + xx^+)^{-1}x - x(1 + x^+x)^{-1}
\ee
\be
=(1 + xx^+)^{-1}\left(x(1 + x^+x) - (1 + xx^+)x\right)(1 + x^+x)^{-1} = 0.
\ee
So:
\be
\tilde F = (1 + xx^+)^{-1}dx\left(-x^+x(1 + x^+x)^{-1} + 1\right)\wedge dx^+
\ee
\be
= (1 + xx^+)^{-1}dx\left(-x^+x + 1 + x^+x\right)(1 + x^+x)^{-1}\wedge dx^+,
\ee
or:
\bel{t_F_is}
\tilde F = (1 + xx^+)^{-1}dx \wedge (1 + x^+x)^{-1}dx^+,
\eel
\bel{F_is1}
F = \rho^{-1}(1 + xx^+)^{-1}dx \wedge (1 + x^+x)^{-1}dx^+ \rho
\eel
To avoid a calculation of inverse matrix in \rf{t_F_is} twice one can use
next identities:
\bel{invert_prop}
(1 + xx^+)^{-1} = 1 - x(1 + x^+x)^{-1}x^+,
\quad (1 + x^+x)^{-1} = 1 - x^+(1 + xx^+)^{-1}x.
\eel
Here is straightforward prove for the first one:
\be
(1 + xx^+)(1 - x(1 + x^+x)^{-1}x^+)
= 1 + x(1 - (1 + x^+x)^{-1} - x^+x(1 + x^+x)^{-1})x^+
\ee
\be
= 1 + x (1 - (1 + x^+x)(1 + x^+x)^{-1})x^+ = 1 + 0 = 1.
\ee
\section{Connection and field tensor in terms of hamiltonian matrix elements.}
To get an expression for $A$ and $F$ via matrix elements of $H$
$\rho$ should be fixed in some way. One possibility to do it
is to require
$\rho = \rho^+$. This gives from \rf{RhoCondition}:
\be
\rho^{-2} = 1 + h_{21}^+(h_{22})^{-2}h_{21}
\ee
or:
\be
\rho = (1 + h_{21}^+(h_{22})^{-2}h_{21})^{-\frac{1}{2}}
= (1 + xx^+)^{-1/2}
\ee
and:
\bel{A_is}
A = (1 + xx^+)^{-1/2}xdx^+(1 + xx^+)^{-1/2}
+ (1 + xx^+)^{1/2} d((1 + xx^+)^{-1/2}),
\eel
\bel{F_is}
F = (1 + xx^+)^{-1/2} dx \wedge (1 + x^+x)^{-1} dx^+ (1 + xx^+)^{-1/2}.
\eel
By using \rf{A_is} and \rf{F_is} calculation can be done in principle
for any $M$ and $N$. Of cause due to matrix structure of $x$ the final
expressions via hamiltonian matrix elements will be very complicated.
But in two important cases when
$N=1$ and $N=M-1$it
is possible to get this result in general form.
In the first one which corresponds to non-degenerated case
$x$ is $1\times (M-1)$ and $xx^+$ are just numbers which are commute.
So defining $r^2 = xx^+$ = $h_{21}^+h_{22}^{-2}h_{21}$ I have:
\be
A = (1 + r^2)^{-1/2}xdx^+(1 + r^2)^{-1/2}
+ (1 + r^2)^{1/2} d((1 + r^2)^{-1/2})
\ee
\be
= \frac{1}{1 + r^2}xdx^+ - \frac{1}{2(1 + r^2)}d(r^2)
= \frac{1}{2(1 + r^2)}(xdx^+ - dxx^+),
\ee
\be
F = \frac{1}{1 + r^2} dx \wedge (1_N + x^+x)^{-1} dx^+
\ee
\be
= \frac{1}{1 + r^2} dx \wedge (1N - \frac{1}{1 + r^2}x^+x) dx^+
\mbox{ from \rf{invert_prop}}
\ee
\be
= \frac{1}{1 + r^2} dx \wedge dx^+
- \frac{1}{(1 + r^2)^2} dxx^+ \wedge xdx^+.
\ee
In the second case $x$ becames $N\times 1$ matrix so $xx^+$ with $A,F$ are
$N\times N$ ones and $x^+x$ with $h_{22}$ are just number.
So define now $r^2 = x^+x$ = $\frac{1}{h_{22}^2}h_{21}h_{21}^+$
I have from \rf{invert_prop}:
\be
(1 + xx^+)^{-1} = 1_N - \frac{1}{1 + r^2}xx^+,
\ee
and by writing $(1 + xx^+)^{-1/2}$ = $1 + xf(r^2)x^+$
I get an quadratic equation for f:
\bel{rec_two}
(1 + xfx^+)^2 = 1 + x(2f + f^2r^2)x^+ = 1 - x\frac{1}{1 + r^2}x^+
\eel
\be
\mbox{or: } 2f + f^2r^2 = - \frac{1}{1 + r^2},
\ee
\be
f^2 + frac{2}{r^2}f + \frac{1}{r^2(1 + r^2)} = 0
\ee
with solution:
\be
f(r^2) = -frac{1}{r^2}\left( 1 \pm \frac{1}{\sqrt{1 + r^2}}\right).
\ee
The same technic gives:
\be
(1 + xx^+)^{1/2} = 1 - \frac{f}{1 + fr^2}xx^+.
\ee
(Here I will write final expression for A and H in more or less form,
but I do not select which up to now)
\section{Topological charges.}
From Postnikov book, "Differential geometry" it follows that to calculate
different Chern number for particular connection on complex manifold
one should find all
$N$ invariant polynomials of $F$ such as $Tr(F)$, $det(F)$ and so on.
(The total number should be $N$ just because $F$ is $N\times N$ matrix).
And then from these polynomials that will be differential forms of even order
from 2 to $2N$ because $F$ itself is 2-oreder by making external product
construct all possible linear independent forms $\omega_A, A = 1..$
of order $D$ where $D$ is parameter space dimension, which is even
due to complex nature of manifold.
Then corresponding Chern number is given by:
\be
c_A = \frac{1}{(2\pi i)^{D/2}} \int \omega_A.
\ee
In the case of $F$ represented by \rf{F_is} $D = 2N\times(M-N)$ as it follows
from \rf{space_is}.
And again explicit calculation of $c_A$ can be done in two mentioned cases.
\subsection{Topological charge for $N=1$ system.}
Here the parameter space dimension is $2N$ and
$F$ is just ordinary 2-form and there is only one invariant polynomial
which is $F$ itself so the connection characterized by only one Chern number:
\be
c_1 = \frac{1}{(2\pi i)^N} \int (F)^N.
\ee
By representing F in the form:
\bel{F_for_N_one}
F = a_2 - a_1 \wedge a_1^+
\eel
with 2-form $a_2 = \frac{1}{1 + r^2} dx \wedge dx^+$
and 1 form $a_1 = \frac{1}{1 + r^2} dxx^+$ $(F)^N$ becomes:
\be
(F)^N = (a_2 - a_1 \wedge a_1^+)^N
= a_2^N - Na_1 \wedge a_1^+ \wedge a_2^{N-1}
\ee
from $(a_1^+ \wedge a_2)^k = 0, k > 1$ for any 1-forms $a_1, a_2$. So:
\be
(F)^N = \frac{1}{(1 + r^2)^{N + 1}}
\left((1 + r^2)(dx \wedge dx^+)^N
- N(dxx^+ \wedge xdx^+)(dx \wedge dx^+)^{N-1}\right).
\ee
In components of $x$: $(x)_{i,1} = z_i, i = 1..N$ with complex numbers $z_i$
I have:
\be
(dx \wedge dx^+) = \sum_{i = 1}^Ndz_i \wedge dz_i^*,
\quad (dxx^+) = \sum_{i = 1}^N dz_i z_i^*,
\ee
\be
\mbox{and: } (dx \wedge dx^+)^k =
\sum_{i_1, i_2, \cdots,i_k }
dz_{i_1} \wedge dz_{i_1}^* \wedge dz_{i_2} \wedge dz_{i_2}^*
\wedge \cdots \wedge dz_{i_k} \wedge dz_{i_k}^*
\ee
\be
= \sum_{i_1 \ne i_2 \ne \cdots \ne i_k \ne i_1}
dz_{i_1} \wedge dz_{i_1}^* \wedge dz_{i_2} \wedge dz_{i_2}^*
\wedge \cdots \wedge dz_{i_k} \wedge dz_{i_k}^*
\ee
\be
= k!\sum_{1 \le i_1 < i_2 < \cdots < i_k \le N}
dz_{i_1} \wedge dz_{i_1}^* \wedge dz_{i_2} \wedge dz_{i_2}^*
\wedge \cdots \wedge dz_{i_k} \wedge dz_{i_k}^,
\ee
\be
\mbox{so: }
(dx \wedge dx^+)^N =
N!dz_1 \wedge dz_1^*\wedge \cdots \wedge dz_N \wedge dz_N^*
= N!\Omega_N \mbox{ by definition,}
\ee
and $(dxx^+ \wedge xdx^+)(dx \wedge dx^+)^{N-1}$ =
\be
(N-1)!
\left(\sum_{i,j} dz_i z_i^* \wedge z_j dz_j^* \right) \wedge
\sum_{1 \le i_1 < i_2 < \cdots < i_{N-1} \le N}
dz_{i_1} \wedge dz_{i_1}^*
\wedge \cdots \wedge dz_{i_{N-1}} \wedge dz_{i_{N-1}}^*
\ee
\be
=
(N-1)!
(\sum_{i} \abs{z_i}^2 dz_i \wedge dz_i^* ) \wedge
\sum_{1 \le i_1 < i_2 < \cdots < i_{N-1} \le N}
dz_{i_1} \wedge dz_{i_1}^*
\wedge \cdots \wedge dz_{i_{N-1}} \wedge dz_{i_{N-1}}^*
\ee
\be
=
(N-1)!
\left(\sum_{i} \abs{z_i}^2\right)
\sum_{1 \le i_1 < i_2 < \cdots < i_N \le N}
dz_{i_1} \wedge dz_{i_1}^*
\wedge \cdots \wedge dz_{i_N} \wedge dz_{i_N}^*
\ee
\be
= (N-1)! r^2 dz_1 \wedge dz_1^*\wedge \cdots \wedge dz_N \wedge dz_N^*
= (N-1)! r^2 \Omega_N.
\ee
Finally I get:
\be
(F)^N = \frac{1}{(1 + r^2)^{N + 1}}
\left((1 + r^2)N! - N(N-1)!r^2\right) \Omega_N =
\frac{N!}{(1 + r^2)^{N + 1}}\Omega_N,
\ee
\be
c_1 = \frac{N!}{(2\pi i)^N} \int \frac{1}{(1 + r^2)^{N + 1}}\Omega_N.
\ee
To calculate previous integral I set $z_i = x_{2i} +ix_{2i - 1}$, $i = 1..N$
so:
\be
dz_i \wedge dz_i^* =
(dx_{2i} + idx_{2i - 1}) \wedge (dx_{2i} - idx_{2i - 1})
\ee
\be
= i(-dx_{2i} \wedge dx_{2i - 1} + dx_{2i - 1} \wedge dx_{2i})
= 2idx_{2i - 1} \wedge dx_{2i},
\ee
\be
\Omega_N = (2i)^N dx_1 \wedge \cdots \wedge dx_{2N},
\ee
\be
\mbox{and: } c_1 = \frac{N!}{\pi^N}\int \frac{1}{(1 + r^2)^{N + 1}}
dx_1 \wedge \cdots \wedge dx_{2N} \equiv
\frac{N!}{\pi^N}\int \frac{1}{(1 + r^2)^{N + 1}}d^{2N}x,
\ee
\be
\mbox{with } r^2 = \sum_{i = 1}^N x_i^2.
\ee
By introducing spherical coordinates and intergrating over unit sphere
$S^{2N-1}$ which gives $\frac{2\pi^N}{(N-1)!}$ I have:
\be
c_1 = \frac{2N!\pi^N}{(N-1)!\pi^N}
\int_0^{\infty}\frac{1}{(1 + r^2)^{N + 1}} r^{2N-1}dr
\ee
\be
= N\int_1^{\infty}\frac{(s + 1)^{N-1}}{s^{N + 1}} ds \mbox{ with } s = r^2 + 1.
\ee
Appling Newton binomial formula:
\be
c_1 = N\sum_{j=0}^{N-1} \frac{(N-1)!}{j!(N-1-j)!}
\int_1^{\infty}s^{j-N-1}(-1)^{N-1-j}
\ee
\be
= \sum_{j=0}^{N-1} \frac{N!}{j!(N-1-j)!}
\frac{1}{j-N}s^{j-N}\bigl|_1^{\infty}(-1)^{N-1-j}
\ee
\be
= \sum_{j=0}^{N-1}\frac{N!}{j!(N-j)!}(-1)^{N-1-j}
= - \sum_{j=0}{N-1}\frac{N!}{j!(N-j)!}(-1)^{N-j}
\ee
\be
= -\left(\sum_{j=0}^{N}\frac{N!}{j!(N-j)!}1^j(-1)^{N-j} - 1\right)
\ee
\be
\mbox{ and finally: } c_1 = -((1-1)^N - 1) = 1.
\ee
This mean that for any $M$ level non-degenerated system corresponding
abelian connection has always unit topological charge.
\subsection{Topological charge for $N=M-1$ system.}
In this case the parameter space dimension is $2N$ again and
field tensor has the form from \rf{F_is1}:
\be
F = a \wedge a^+,
\ee
with $N\times 1$ matrix $a = (1 + xx^+)^{-1/2} dx (1 + x^+x)^{-1/2}$.
Now to calculate $N$ different invariant polynomials of $F$
I will use standard formula:
\be
det(X) = \exp (Tr\log X)
\ee
(Of cause I should say here a lot of words how can function of differential
matrix-valued forms be defined, but because even forms are commute
and any polynomial of F is even form, it is possible to define
any function of $F$ just by corresponding Tailor sum and because
$F^k = 0 for k > N$ by properties of differential forms such expansion
will contain only finite number of terms and any matrix property that
can be proven only by using the expansion such as previous one will
be hold for even order differentials forms too.)
From Tailor expansion of $\log$:
\be
Tr\log (\lambda - F) = Tr(\log\lambda 1_N) + Tr\log (1 - \frac{1}{\lambda}F)
= N\log\lambda - Tr\left(\sum_{k=1}^{\infty} \frac{1}{k}
\frac{1}{\lambda^k}F^k\right)
\ee
\be
= N\log\lambda - \left(\sum_{k=1}^{N} \frac{1}{k}
\frac{1}{\lambda^k}Tr(F^k)\right),
\ee
But:
\be
Tr(F^k) = Tr((a \wedge a^+)^k) = Tr(a (a^+ \wedge a)^{k - 1} a^+)
= - Tr(a^+(a^+ \wedge a)^{k - 1}a)
\ee
\be
= - Tr((a^+ \wedge a)^k) \equiv -(a^+ \wedge a)^k
\ee
because $a^+ \wedge a$ is just an ordinary 2-form. This leads to:
\be
Tr\log (\lambda - F)
= N\log\lambda + \left(\sum_{k=1}^{N} \frac{1}{k}
\frac{1}{\lambda^k}(a^+ \wedge a)^k\right)
\ee
\be
= N\log\lambda - \log\left(1 - \frac{1}{\lambda}(a^+ \wedge a)\right)
= \log\left(\lambda^N(1 - \frac{1}{\lambda}(a^+ \wedge a))^{-1}\right),
\ee
So:
\be
det(\lambda - F) = \exp(Tr\log (\lambda - F))
= \lambda^N(1 - \frac{1}{\lambda}(a^+ \wedge a))^{-1}.
\ee
Again appling Tailor expansion:
\be
(1 - \frac{1}{\lambda}(a^+ \wedge a))^{-1}
= \sum_{k=0}^{\infty}\frac{1}{\lambda^k}(a^+ \wedge a)^k
= \sum_{k=0}^{N}\frac{1}{\lambda^k}(a^+ \wedge a)^k
\ee
I have:
\be
det(\lambda - F) = \sum_{k=0}^{N}\lambda^{N-k}(a^+ \wedge a)^k,
\ee
which immediately gives that all invariant polynomials of $F$ has the form
\be
(-1)^k(a^+ \wedge a)^k = (-a^+ \wedge a)^k
\ee
and product of them which
gives 2N-form is always $(-a^+ \wedge a)^N$. This means that all Chern
number in the case $N=M-1$ are the same and given by formula:
\be
c = \frac{1}{(2\pi i)^N} \int (-a^+ \wedge a)^N.
\ee
\be
a^+ \wedge a =
(1 + x^+x)^{-1/2}dx^+ (1 + xx^+)^{-1/2} \wedge
(1 + xx^+)^{-1/2} dx (1 + x^+x)^{-1/2}
\ee
\be
= (1 + x^+x)^{-1}dx^+\wedge (1 + xx^+)^{-1}dx
\ee
because $x^+x \equiv r^2$ is just number and using \rf{rec_two}:
\be
a^+ \wedge a =
\frac{1}{1 + r^2} dx^+ \wedge dx
- \frac{1}{1 + r^2} dx^+x \wedge \frac{1}{1 + r^2} x^+dx
\ee
\be
= a^\prime_2 - a^\prime_1 \wedge a^{\prime +}_1,
\quad a^\prime_2 = \frac{1}{1 + r^2} dx^+ \wedge dx,
\quad a^\prime_1 = \frac{1}{1 + r^2} dx^+x.
\ee
If now I let $x^\prime = x^+$ I have:
\be
x^\prime x^{\prime +} = x^+x = r^2,
\quad a^\prime_2 = \frac{1}{1 + r^2} dx^\prime \wedge dx^{\prime +},
\quad a^\prime_1 = \frac{1}{1 + r^2} dx^\prime x{\prime +}.
\ee
But this the same as expression \rf{F_for_N_one} for $F$ in $N=1$ case
if one forget about ', which immediately gives:
\be
(-a^+ \wedge a)^N = (-1)^N\frac{N!}{(1 + r^2)^{N + 1}}\Omega^\prime_N,
\ee
with
\be
\Omega^\prime_N =
dz^\prime_1 \wedge dz^{\prime *}_1\wedge
\cdots \wedge dz^\prime_N \wedge dz^{\prime *}_N
\ee
and taking in account:
\be
z^\prime_j = (x^\prime)_{1j} = ((x)_{j1})^* = z_j^*:
\ee
\be
\Omega^\prime_N =
dz_1^* \wedge dz_1\wedge
\cdots \wedge dz_N^* \wedge dz_N
= (-1)^N
dz_1 \wedge dz_1^*\wedge
\cdots \wedge dz_N \wedge dz_N^* = (-1)^N\Omega_N,
\ee
I have:
\be
(-a^+ \wedge a)^N = \frac{N!}{(1 + r^2)^{N + 1}}\Omega_N
\ee
or the same expression as in $N=1$ case. And using result of previous section:
\be
c_A = \frac{1}{(2\pi i)^N} \int \frac{N!}{(1 + r^2)^{N + 1}}\Omega_N = 1.
\ee
So in the $N=M-1$ case all possible Chern numbers are equal to one.
%\begin{thebibliography}{10}
%\bibitem{FirstBerryWork}
% Berry~M.~V. // Proc. Roy. Soc. 1987. V. A392. P.~45.
%\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.6045886532,
"avg_line_length": 29.6358463727,
"ext": "tex",
"hexsha": "174146bf73e455945aa5dbfb2e22f4b09c9a06d8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dc12d4a98c626414264c0cf38b357035e6e04a45",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "ibukanov/ahome",
"max_forks_repo_path": "writes/minsk_if/CP_N/inst.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dc12d4a98c626414264c0cf38b357035e6e04a45",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "ibukanov/ahome",
"max_issues_repo_path": "writes/minsk_if/CP_N/inst.tex",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dc12d4a98c626414264c0cf38b357035e6e04a45",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "ibukanov/ahome",
"max_stars_repo_path": "writes/minsk_if/CP_N/inst.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9072,
"size": 20834
} |
%\documentclass[first,firstsupp,handout,compress,notes,navigation]{ETHclass}
%\documentclass[first,firstsupp,handout,lastsupp]{ETHclass}
\documentclass[first,firstsupp,lastsupp,handout,last,hyperref,table]{ETHclass}
%\documentclass[first,firstsupp]{ETHclass}
\usepackage{etex}
\usepackage{adjustbox}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{animate}
\usepackage{booktabs}
\usepackage{charter}
\usepackage{etoolbox}
\usepackage{ifthen}
\usepackage{longtable}
\usepackage{mathrsfs}
\usepackage{multicol}
\usepackage{pgf}
\usepackage{pgfplots}
\usepackage{pifont}
\usepackage{ragged2e}
\usepackage{standalone}
\usepackage[caption=false]{subfig}
\usepackage{tabularx}
\usepackage{tikz}
\usepackage{verbatim}
\usepackage{xcolor}
\usepackage{hyperref}
\setbeamertemplate{navigation symbols}{}
\usetikzlibrary{arrows,decorations.pathreplacing,positioning,shapes,shadows}
%\usepackage[style=numeric-comp]{biblatex}
%\usepackage{lipsum}
%\usetikzlibrary{fit}
\usetikzlibrary{arrows}
\usetikzlibrary{trees}
% Options for beamer:
%
% 9,10,11,12,13,14,17pt Fontsizes
%
% compress: navigation bar becomes smaller
% t : place contents of frames on top (alternative: b,c)
% handout : handoutversion
% notes : show notes
% notes=onlyslideswithnotes
%
%hyperref={bookmarksopen,bookmarksnumbered} : Needed for menues in
% acrobat. Also need
% pdftex as option or
% compile with
% pdflatex '\PassOptionsToPackage{pdftex,bookmarksopen,bookmarksnumbered}{hyperref} \input{file}'
%\usepackage{beamerseminar}
%\usepackage[accumulated]{beamerseminar}
% remove ``accumulated'' option
% for original behaviour
%\usepackage{beamerbasenotes}
%\setbeamertemplate{note page}[plain]
%\setbeameroption{notes on second screen}
%\setbeamertemplate{note page}[plain]
\setbeamertemplate{note page}{\ \\[.3cm]
\textbf{\color{blue}Notes:}\\%[0.1cm]
{\footnotesize %\tiny
\insertnote}}
%\setbeameroption{notes on second screen}
%\setbeamertemplate{navigation symbols}{} % suppresses all navigation symbols:
\setbeamertemplate{navigation symbols}[horizontal] % Organizes the navigation symbols horizontally.
% \setbeamertemplate{navigation symbols}[vertical] % Organizes the navigation symbols vertically.
% \setbeamertemplate{navigation symbols}[only frame symbol] % Shows only the navigational symbol for navigating frames.
\setlayoutscale{0.5}
\setparametertextfont{\scriptsize}
\setlabelfont{\scriptsize}
% \useoutertheme[subsection=false]{miniframes}
% \usepackage{etoolbox}
% \makeatletter
% \patchcmd{\slideentry}{\advance\beamer@xpos by1\relax}{}{}{}
% \def\beamer@subsectionentry#1#2#3#4#5{\advance\beamer@xpos by1\relax}%
% \makeatother
% \makeatletter
% \newenvironment{withoutheadline}{
% \setbeamertemplate{headline}{%
% \vspace{15pt}
% }
% }{}
% \makeatother
\makeatletter
\newenvironment{withoutheadline}{
\setbeamertemplate{headline}{%
\vspace{35pt}
}
%\def\beamer@entrycode{\vspace*{-1.5\headheight}}
}{}
\makeatother
\newcommand{\Cross}{$\mathbin{\tikz [x=1.4ex,y=1.4ex,line width=.2ex, red] \draw (0,0) -- (1,1) (0,1) -- (1,0);}$}%
\newcommand{\Checkmark}{$\color{green}\checkmark$}
\setbeamerfont{subsection in toc}{size=\tiny}
\makeatletter
\patchcmd{\beamer@sectionintoc}
{\vfill}
{\vskip1.5\itemsep}
{}
{}
\makeatother
\setbeamertemplate{frametitle continuation}{}
\setbeamertemplate{bibliography entry title}{}
\setbeamertemplate{bibliography entry author}{}
\setbeamertemplate{bibliography entry location}{}
\setbeamertemplate{bibliography entry note}{}
\setbeamercolor*{bibliography entry title}{fg=black}
\setbeamercolor*{bibliography entry author}{fg=black}
\setbeamercolor*{bibliography entry location}{fg=black}
\setbeamercolor*{bibliography entry note}{fg=black}
% and kill the abominable icon
%\setbeamertemplate{bibliography item}{\color{forestgreen}$\blacktriangleright$}
\setbeamertemplate{bibliography item}{\insertbiblabel}
%\setbeamertemplate{bibliography item}{\theenumiv}
\newcommand{\highlightred}[1]{%
\colorbox{red!50}{$\displaystyle#1$}}
\newcommand{\highlightyellow}[1]{%
\colorbox{yellow!50}{$\displaystyle#1$}}
\newcommand{\highlightgreen}[1]{%
\colorbox{green!50}{$\displaystyle#1$}}
\AtBeginSection[]{
\begin{frame}
\vfill
\centering
\begin{beamercolorbox}[sep=8pt,center,shadow=true,rounded=true]{title}
\usebeamerfont{frametitle}\includegraphics[width=2ex]{freccia_trasparente_verde_foresta.png}\hspace{.5ex}~{\LARGE \textsc{\bfseries \insertsectionhead}}\par%
\end{beamercolorbox}
\vfill
\end{frame}
}
\hyphenpenalty=5000
\tolerance=1000
\graphicspath{{figures/}}
\newenvironment{system}{\left\lbrace\begin{array}{@{}l@{}}}{\end{array}\right.}
\newenvironment{subsystem}{\left\lgroup\begin{array}{@{}l@{}}}{\end{array}\right.}
\defbeamertemplate*{title page}{customized}[1][]
{
\usebeamerfont{subtitle}
\usebeamercolor[fg]{subtitle}
\vspace{-1.75cm}
{\flushleft
\usebeamerfont{title}{\inserttitle}\par
}
\vspace{-.25cm}
{\flushleft
\usebeamerfont{subtitle}{\small \insertsubtitle} \par
}
%\vspace{-.5cm}
{\flushright
\setbeamercolor{author}{bg=white,fg=Red}
\usebeamerfont{author}{\footnotesize \insertauthor} \par}
\vspace{-.2cm}
{\flushright
\usebeamerfont{institute}{\tiny \insertinstitute}\par }
\vspace{.2cm}
{\centering
\usebeamerfont{date}{\scriptsize \insertdate} \par }
\vspace{0.2in}
}
\begin{document}
\setbeamertemplate{caption}{\raggedright\insertcaption\par}
\title[\textsc{Mod\`eles microm\'caniques du dommage intra-laminaire}]{\textsc{Mod\`eles microm\'ecaniques du dommage intra-laminaire dans les stratifi\'es avec couches fines}}
\author{ L. Di Stasio$^{1,2}$, Z. Ayadi$^{1}$, J. Varna$^{2}$}
%\institute{ Science et Ing\'enierie des Mat\'eriaux et M\'etallurgie (SI2M), Institut Jean Lamour, Nancy, France\\Department of Engineering Sciences and Mathematics, Division of Materials Science, Lule\aa\ University of Technology, Lule\aa, Sweden}
\institute{$^{1}$EEIGM, Universit\'e de Lorraine, Nancy, France\\$^{2}$Division of Materials Science, Lule\aa\ University of Technology, Lule\aa, Su\`ede}
\date{Journ\'ee de l'\'Equipe 304, Nancy (FR), 5 juillet 2017}
\begin{frame}[plain]
\titlepage
\end{frame}
\begin{withoutheadline}
\begin{frame}
\frametitle{Sommaire}
\justifying
\vspace*{-0.5cm}
% \tableofcontents[hidesubsections]
% \begin{multicols}{2}
% \tableofcontents[hidesubsections]
% \end{multicols}
% \begin{columns}[t]
% \begin{column}{.5\textwidth}
% \tableofcontents[sections={1-2}]
% \end{column}
% \begin{column}{.5\textwidth}
% \tableofcontents[sections={3-6}]
% \end{column}
% \end{columns}
% \end{frame}
\tableofcontents[hidesubsections]
\end{frame}
\end{withoutheadline}
%\note{}
%\begin{frame}
%\pagediagram
%\end{frame}
%% \note{}
\section[Les stratifi\'es avec couches minces]{Les composites stratifi\'es avec couches extr\^emement minces}
\subsection{La technologie Spread Tow}
\begin{frame}
\frametitle{Introduction \`a la technologie \textit{Spread Tow}}
\vspace{-0.25cm}
\centering
\begin{itemize}
\item {\small D\'evelopp\'ee initialement au Japon entre 1995 et 1998}
\item {\small Au cours de la derni\`ere d\'ecennie, son domaine d'application s'est \'elargi \`a partir des \'equipements sportifs jusqu'\`a structures primaires, comme \textit{Solar Impulse 2}}
\item {\small Un nombre r\'eduit de producteurs: NTPT (USA-CH), Oxeon (SE), Chomarat (FR), Hexcel (USA), Technomax (JP)}
\end{itemize}
\begin{figure}[!h]
\centering
\subfloat[\scriptsize By North Thin Ply Technology.\label{fig:solar_impulse}]{\includegraphics[height=0.25\textheight,width=0.45\textwidth]{ntpt_solar-impulse-1.jpg}}\quad
\subfloat[\scriptsize By TeXtreme.\label{fig:solar_car}]{\includegraphics[height=0.25\textheight,width=0.45\textwidth]{textreme_solar_car.jpg}}
%\caption{Single RVE model.}
\label{fig:thin-ply-examples}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Fondements de la technologie \textit{Spread Tow}}
\vspace{-0.5cm}
\centering
\begin{figure}
\centering
\includegraphics[height=0.85\textheight]{spread-tow-tech.pdf}
%\caption{}
\label{fig:spread-tow-schematic}
\end{figure}
\end{frame}
\subsection{L'effet thin ply sur les fissures transversales}
\begin{frame}
\frametitle{Une d\'efinition visuelle des fissures transversales}
\vspace{-0.75cm}
\centering
\captionsetup[subfigure]{labelfont=footnotesize}
\begin{figure}[!h]
\centering
\subfloat[\scriptsize Par Dr. R. Olsson, Swerea, SE.\label{fig:all-cracks}]{\includegraphics[width=0.46\textwidth]{all-cracks.png}}\quad
\subfloat[\scriptsize Par Prof. Dr. E. K. Gamstedt, KTH, SE.\label{fig:transverse-cracks}]{\includegraphics[width=0.5\textwidth]{intralaminar-cracks.png}}
\caption{Pour une d\'efinition visuelle des fissures transversales.}
\label{fig:intralaminar-cracks}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{L'effet \textit{thin ply}}
\vspace{-0.5cm}
\centering
%\begin{\itemize}[!h]
%%\item First experimentally observed40 years ago by Parvizi and Bailey [1]
%\item []
%
%\end{\itemize}
%\begin{alertblock}{\bf{Main observation}}
%The transverse strength at failure measured for unidirectional composites (UD) is not applicable to a thin layer inside a laminate. Its real strength, known as the in-situ strength, is in fact much higher.
%\end{alertblock}
%\begin{itemize}
%\item First experimentally observed $40$ years ago by Parvizi and Bailey [1]
%\begin{alertblock}{\bf{Main observation}}
%The transverse strength at failure measured for unidirectional composites (UD) is not applicable to a thin layer inside a laminate. Its real strength, known as the in-situ strength, is in fact much higher.
%\end{alertblock}
%\item Many experimental and numerical studies [2,3] devoted to damage propagation
%\item Crack initiation has been mostly neglected
%\end{itemize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.75\textheight]{Flaggs-Kural_InSituTransverseStrength.pdf}
\caption{\tiny Mesures de la contrainte transversale maximale \textit{in-situ} par D. L. Flaggs \& M. H. Kural, 1982 [1].}
\label{fig:in-situ-strength}
\end{figure}
\end{frame}
\section{Objectifs \& Approche}
\begin{frame}
\frametitle{Objectifs \& Approche}
\vspace{-0.25cm}
\centering
\scriptsize
\begin{alertblock}{\small \bf{Objectifs}}
\begin{itemize}
\item \'Etudier les effets de la fraction volum\'etrique des fibres, l'\'epaisseur du pli mince et des plis proches sur l'initiation des fissures
\item Inf\'erer une relation comme
\begin{equation*}
G_{*c}=G_{*c}\left(\theta_{decollement},\Delta\theta_{decollement}, E_{\left(\cdot\cdot\right)}, \nu_{\left(\cdot\cdot\right)}, G_{\left(\right)},VF_{f}, t_{pli}, \frac{t_{pli}}{t_{plis\ proches}}\right)
\end{equation*}
\end{itemize}
\end{alertblock}
\begin{alertblock}{\small \bf{Approche}}
\begin{itemize}
\item Conception et cat\'egorisation des Volumes \'El\'ementaires Repr\'esentatifs (VERs)
\item G\'en\'eration automatique de la g\'eom\'etrie et du mod\`ele aux \'El\'ements Finis
\item Simulation avec la M\'ethode aux \'El\'ements Finis (avec Abaqus)
\end{itemize}
\end{alertblock}
\end{frame}
\section[Les mod\`eles]{Les mod\`eles microm\'ecaniques}
\subsection[Conception des VERs]{Conception des Volumes \'El\'ementaires Repr\'esentatifs (VERs)}
\begin{frame}
\frametitle{\small De l '\'echelle macroscopique \`a l'\'echelle microscopique}
\vspace{-1cm}
\centering
\begin{figure}
\centering
\includegraphics[height=0.85\textheight]{laminate-section.pdf}
%\caption{}
\label{fig:spread-tow-schematic}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Volumes \'El\'ementaires Repr\'esentatifs (VERs)}
\vspace{-0.75cm}
\centering
\begin{figure}
\centering
\includegraphics[height=0.8\textheight]{periodicRVE_cc_FR.pdf}
%\caption{\scriptsize Visualization of reference RVE inside a periodic square-array structure.}
\label{fig:periodicRVE}
\end{figure}
\end{frame}
\subsection[Le maillage]{Conception et g\'en\'eration du maillage}
\begin{frame}
\frametitle{\vspace*{0.25cm}\small Conception et g\'en\'eration du maillage}
\vspace{-0.5cm}
\centering
\tiny
\begin{alertblock}{\scriptsize \bf{Pourquoi un bon maillage est fondamental}}
\begin{enumerate}
\item La discr\'etisation g\'eom\'etrique a un effet tr\`es fort sur la solution des probl\`emes MEF non-lin\'eaires
\item Le dommage g\'en\'ere changements de la g\'eom\'etrie, avec g\'en\'eration des surfaces et division du domaine
\item Les variables descriptives du dommage d\'ependent de la topologie locale et du raffinement du maillage
\end{enumerate}
\end{alertblock}
\begin{alertblock}{\scriptsize \bf{Proc\'dure \`a 4 \'etapes pour la g\'en\'eration du maillage}}
\begin{enumerate}
\item La fronti\`ere est g\'ener\'ee avec repr\'esentations analytiques
\item La fronti\`ere est divisé en 4 coins ($c_{i}$) et 4 bords ($e_{i}$)
\item Application de la m\`ethode de \textit{transfinite interpolation} avec polyn\^omes multidimensionnelle de Lagrange
\begin{equation*}
P_{1}(x,p_{j})=\sum_{j=1}^{n}p_{j}\prod_{k=1\ k\neq j}^{n}\frac{x-x_{k}}{x_{j}-x_{k}}\quad P_{2}(x,y,p_{j},q_{j})=P_{1}(x,p_{j})\otimes P_{1}(y,q_{j})
\end{equation*}
\begin{equation*}
r(\xi,\eta)=P_{1}(\xi,e_{2},e_{4})+P_{1}(\eta,e_{1},e_{3})- P_{2}(\xi,\eta,c_{1},c_{2},c_{3},c_{4})
\end{equation*}
\item Le maillage est raffiné avec l'application d'un op\'erateur elliptique global
\begin{equation*}
g^{11}\underline{r}_{,\xi\xi}+2g^{12}\underline{r}_{,\xi\eta}+g^{22}\underline{r}_{,\eta\eta}=0
\end{equation*}
\end{enumerate}
\end{alertblock}
\end{frame}
\begin{frame}
\frametitle{Discr\'etisation angulaire}
\vspace{-0.7cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{mesh-disc-at-interface.pdf}
\caption{Discr\'etisation de l'interface fibre/matrice: $\delta=\frac{360^{\circ}}{4N_{\alpha}}$.}
\label{fig:angu-discr-def}
\end{figure}
\end{frame}
\subsection[Mod\`ele MEF]{Mod\`ele aux \'El\'ements Finis en Abaqus}
\subsection[\'Evaluation num\'erique de $G_{c}$]{\'Evaluation num\'erique du taux de restitution d'\'energie}
\begin{frame}
\frametitle{Technique de fermeture virtuelle de fissure (VCCT)}
\vspace{-1.5cm}
\centering
\scriptsize
\begin{figure}[!h]
\centering
\includegraphics[height=0.6\textheight]{VCCT.pdf}
% \caption{Angular discretization at fiber/matrix interface.}
\label{fig:vcct}
\end{figure}
\begin{equation*}
G_{I}=\frac{Z_{C}\Delta w_{C}}{2B\Delta a}\quad G_{II}=\frac{X_{C}\Delta u_{C}}{2B\Delta a}\Longleftrightarrow\text{Logiciels internes et fonction *DEBOND en Abaqus}
\end{equation*}
\end{frame}
\begin{frame}
\frametitle{\'Evaluation de l'int\'egrale J}
\vspace{-0.75cm}
\centering
\begin{figure}[!h]
\centering
\includegraphics[height=0.6\textheight]{J-integral.pdf}
% \caption{Angular discretization at fiber/matrix interface.}
\label{fig:jintegral}
\end{figure}
\scriptsize
\begin{equation*}
J_{i}=\lim_{\varepsilon\to 0}\int_{\Gamma_{\varepsilon}}\left(W\left(\Gamma\right)n_{i}-n_{j}\sigma_{jk}\frac{\partial u_{k}\left(\Gamma,x_{i}\right)}{\partial x_{i}}\right)d\Gamma\Longleftrightarrow\text{*CONTOUR INTEGRAL en Abaqus}
\end{equation*}
\end{frame}
\section[R\'esultats]{R\'esultats pr\'liminaires \& Perspectives}
\subsection{$\sigma_{0}$ et $G_{0}$}
\begin{frame}
\frametitle{\small $\sigma_{0}$ pour $Vf_{f}=0.001$, $\frac{L}{R_{f}}\sim28$ et $\delta=0.4^{\circ}$}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-23_AbqRunSummary_SingleFiberEqRfSmallFiniteStrain_sigma-inf_Summary.pdf}
\caption{\scriptsize En rouge MEF en petites d\'eformations, en vert MEF en grands d\'eformations, en noir $\sigma_{0}=\frac{E}{1-\nu^{2}}\varepsilon$.}
\label{fig:res1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\small $G_{0}$ pour $Vf_{f}=0.001$, $\frac{L}{R_{f}}\sim28$ et $\delta=0.4^{\circ}$}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-23_AbqRunSummary_SingleFiberEqRfSmallFiniteStrain_G0_Summary.pdf}
\caption{\scriptsize En rouge MEF en petites d\'eformations, en vert MEF en grands d\'eformations, en noir $G_{0}$ avec $\sigma_{0}=\frac{E}{1-\nu^{2}}\varepsilon$.}
\label{fig:res1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\small $\sigma_{0}$ pour $Vf_{f}=0.000079$, $\frac{L}{R_{f}}\sim100$ et $\delta=0.4^{\circ}$}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-16_AbqRunSummary_SingleFiberEqRfSmallStrain-D0-4_sigma-inf_Summary.pdf}
\caption{\scriptsize En rouge MEF en petites d\'eformations, en noir $\sigma_{0}=\frac{E}{1-\nu^{2}}\varepsilon$.}
\label{fig:res1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\small $G_{0}$ pour $Vf_{f}=0.000079$, $\frac{L}{R_{f}}\sim100$ et $\delta=0.4^{\circ}$}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-16_AbqRunSummary_SingleFiberEqRfSmallStrain-D0-4_G0_Summary.pdf}
\caption{\scriptsize En rouge MEF en petites d\'eformations, en noir $G_{0}$ avec $\sigma_{0}=\frac{E}{1-\nu^{2}}\varepsilon$.}
\label{fig:res1}
\end{figure}
\end{frame}
%\begin{frame}
%\frametitle{\small Conclusions}
%\vspace{-0.5cm}
%\centering
%\begin{itemize}[label=\ding{212}]
%\item $\sigma_{0}$ and $G_{0}$ depend on $\Delta\theta$ for finite sizes of the RVE
%\item As the RVE size $\rightarrow\infty$, i.e. $\frac{L}{R_{f}}\rightarrow \infty$ ($\sim 100$), $\sigma_{0}$ and $G_{0}$ tend to the theoretical undamaged value given by $\sigma_{0}=\frac{E_{m}}{1-\nu_{m}^{2}}\varepsilon_{0}$
%\item $\sigma_{0}$ and $G_{0}$ might be taken as a good measure of "infinetess" for strain-/displacement-controlled simulations
%\item By selecting $\Delta\theta=10^{\circ}$ and running a parametric study with a comparatevely coarse mesh the minimum ratio $\frac{L}{R_{f}}$ or equivalently maximum $Vf_{f}$ volume to have an infinite RVE could be found
%\end{itemize}
%\end{frame}
\subsection{Formulation en petites et grands d\'eformations}
\begin{frame}
\frametitle{\small $\frac{G_{\left(\cdot\cdot\right)}}{G_{0}}$ pour $V_{f}=0.001$, $\frac{L}{R_{f}}\sim28$ et $\delta=0.4^{\circ}$}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-23_AbqRunSummary_SingleFiberEqRfSmallFiniteStrain_M-VCCT_Summary.pdf}
\caption{\scriptsize En rouge MEF en petites d\'eformations, en vert MEF en grands d\'eformations, en noir r\'esultats BEM.}
\label{fig:res1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\small $\frac{G_{\left(\cdot\cdot\right)}}{G_{0}}$ pour $V_{f}=0.001$, $\frac{L}{R_{f}}\sim28$ et $\delta=0.4^{\circ}$, petites d\'eformations}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-23_AbqRunSummary_SingleFiberEqRfSmallStrain_J-INT_Summary.pdf}
\caption{\scriptsize De bleu jusqu'a rouge int\'egrales J calcul\'ees sur contours plus loin de l'extr\'emit\'e de la fissure, en noir r\'esultats BEM.}
\label{fig:res1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\small $\frac{G_{\left(\cdot\cdot\right)}}{G_{0}}$ pour $V_{f}=0.001$, $\frac{L}{R_{f}}\sim28$ et $\delta=0.4^{\circ}$, petites d\'eformations}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-23_AbqRunSummary_SingleFiberEqRfSmallStrain_VCCT-JINT_Summary.pdf}
\caption{\scriptsize De bleu jusqu'a rouge int\'egrales J calcul\'ees sur contours plus loin de l'extr\'emit\'e de la fissure, en vert m\'ethode VCCT par le logiciel interne, en noir r\'esultats BEM.}
\label{fig:res1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\small $\frac{G_{\left(\cdot\cdot\right)}}{G_{0}}$ pour $V_{f}=0.001$, $\frac{L}{R_{f}}\sim28$ et $\delta=0.4^{\circ}$, grands d\'eformations}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-23_AbqRunSummary_SingleFiberEqRfFiniteStrain_J-INT_Summary.pdf}
\caption{\scriptsize De bleu jusqu'a rouge int\'egrales J calcul\'ees sur contours plus loin de l'extr\'emit\'e de la fissure, en noir r\'esultats BEM.}
\label{fig:res1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\small $\frac{G_{\left(\cdot\cdot\right)}}{G_{0}}$ pour $V_{f}=0.001$, $\frac{L}{R_{f}}\sim28$ et $\delta=0.4^{\circ}$, grands d\'eformations}
\vspace{-0.5cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\includegraphics[height=0.7\textheight]{2017-06-23_AbqRunSummary_SingleFiberEqRfFiniteStrain_VCCT-JINT_Summary.pdf}
\caption{\scriptsize De bleu jusqu'a rouge int\'egrales J calcul\'ees sur contours plus loin de l'extr\'emit\'e de la fissure, en vert m\'ethode VCCT par le logiciel interne, en noir r\'esultats BEM.}
\label{fig:res1}
\end{figure}
\end{frame}
%\begin{frame}
%\frametitle{\small Conclusions}
%\vspace{-0.5cm}
%\centering
%\begin{itemize}[label=\ding{212}]
%\item For both small and finite strain formulations, J-integrals are already in good agreement with $\frac{G_{TOT}}{G_{0}}$ from BEM, i.e. no sizeable finite size effect already at $\frac{L}{R_{f}}\sim 28$
%\item For both small and finite strain formulations, J-integrals correct measure the peak value of $\frac{G_{TOT}}{G_{0}}$ at $60^{\circ}$
%\item J-Integrals in small strain slightly overestimate the BEM result
%\item J-integrals in small strain shows poor convergence in the range $50^{\circ}-80^{\circ}$
%\item J-Integrals in finite strain slightly underestimate the BEM result
%\item J-integrals in finite strain shows very good convergence in all the range $10^{\circ}-150^{\circ}$
%\end{itemize}
%\end{frame}
%\begin{frame}
%\frametitle{\small Conclusions}
%\vspace{-0.5cm}
%\centering
%\begin{itemize}[label=\ding{212}]
%\item $\frac{G_{TOT}}{G_{0}}$ is correctly calculated by the VCCT in small strain, in good agreement with BEM results
%\item $\frac{G_{TOT}}{G_{0}}$ is wrongly calculated by the VCCT in finite strain, with a peak at $65^{\circ}-70^{\circ}$
%\item Small strain VCCT shows better results than finite strain VCCT
%\item Mode ratio is still not correct, i.e. probably finite size effect
%\end{itemize}
%\end{frame}
%\begin{frame}
%\frametitle{\small Observations \& Questions}
%\vspace{-0.45cm}
%\centering
%\begin{itemize}[label=\ding{212}]
%\item J-Integral is a far-field technique, using stresses, strain and displacements far from the crack tip; convergence is in fact far from crack tip (at least 10 contours, i.e. 10 ring of elements)
%\item VCCT is a local technique, using forces and displacements at the crack tip
%\item The difference between small and finite strain results rests mainly in the displacements
%\item Previously, we observed that changing the formulation of the bonded interface, all other parameters equal, the result doesn't change
%\item All the convergence problem reduces to the correct evaluation of displacements of debonded surfaces close to the crack tip
%\item Displacements of debonded surfaces close to the crack tip are influenced by RVE size
%\end{itemize}
%\end{frame}
%\begin{frame}
%\frametitle{\small Observations \& Questions}
%\vspace{-0.25cm}
%\centering
%\begin{itemize}[label=\ding{212}]
%\item Small strain shows (correctly) better results than finite strain formulation with respect to infinite reference values
%\item However, Abaqus documentation suggests that, if contact between surfaces is present in the model, finite strain formulation (nonlinear geometry) should be used
%\item For finite sizes of RVE, which formulation should be chosen?
%\end{itemize}
%\end{frame}
%\subsection{Elements's aspect ratio}
%
%\begin{frame}
%\frametitle{\small $\frac{G_{\left(\cdot\cdot\right)}}{G_{0}}$ for $Vf_{f}=0.000079$, $\frac{L}{R_{f}}\sim100$ and $\delta=1.0^{\circ}$, small strain formulation}
%\vspace{-0.5cm}
%\centering
%\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
%\begin{figure}[!h]
%\centering
%\includegraphics[height=0.7\textheight]{2017-06-16_AbqRunSummary_SingleFiberEqRfSmallStrain-D1-0_VCCT-JINT_Summary.pdf}
% \caption{\scriptsize Fading from blue to red J-Integrals evaluated at contours at increasing distance from the crack tip, in green evaluation with in-house VCCT routine, in black BEM results.}
% \label{fig:res1}
%\end{figure}
%\end{frame}
%
%\begin{frame}
%\frametitle{\small $\frac{G_{\left(\cdot\cdot\right)}}{G_{0}}$ for $Vf_{f}=0.000079$, $\frac{L}{R_{f}}\sim100$ and $\delta=0.4^{\circ}$, small strain formulation}
%\vspace{-0.5cm}
%\centering
%\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
%\begin{figure}[!h]
%\centering
%\includegraphics[height=0.7\textheight]{2017-06-16_AbqRunSummary_SingleFiberEqRfSmallStrain-D0-4_VCCT-JINT_Summary.pdf}
% \caption{\scriptsize Fading from blue to red J-Integrals evaluated at contours at increasing distance from the crack tip, in green evaluation with in-house VCCT routine, in black BEM results.}
% \label{fig:res1}
%\end{figure}
%\end{frame}
%\begin{frame}
%\frametitle{\small Conclusions}
%\vspace{-0.5cm}
%\centering
%\begin{itemize}[label=\ding{212}]
%\item Elements' aspect ratio (maximum side length/minimum side length) was very high in the exterior part of the matrix in this set of simulations
%\item Spurious stresses adn deformations were created at $45^{\circ},135^{\circ},225^{\circ},315^{\circ}$
%\item Results are badly affected by this in the range $40^{\circ} - 70^{\circ}$ with a marked oscillation between $40^{\circ} - 50^{\circ}$
%\item Elements' aspect ratio in the matrix is more important than the elements' size at the fiber/matrix interface
%\item Program has already been changed to receive aspect ratios as input instead of number of elements
%\item Results from previous sections were calculated with meshes with controlled aspect ratios
%\end{itemize}
%\end{frame}
%\begin{frame}
%\frametitle{$\sigma_{xx}$ along radial sections}
%\vspace{-0.35cm}
%\centering
%\captionsetup[subfigure]{font=scriptsize,labelfont=scriptsize}
%\begin{figure}[!h]
%\centering
%\includegraphics[height=0.7\textheight]{AllRadialSections-S11.pdf}
% \caption{$\Delta\theta=5^{\circ},\delta=0.4^{\circ},VF_{f}=0.001,\frac{l}{R_{f}}\approx28$}
% \label{fig:allradialS11}
%\end{figure}
%\end{frame}
%
%\begin{frame}
%\frametitle{$\sigma_{zz}$ along radial sections}
%\vspace{-0.35cm}
%\centering
%\captionsetup[subfigure]{font=scriptsize,labelfont=scriptsize}
%\begin{figure}[!h]
%\centering
%\includegraphics[height=0.7\textheight]{AllRadialSections-S22.pdf}
% \caption{$\Delta\theta=5^{\circ},\delta=0.4^{\circ},VF_{f}=0.001,\frac{l}{R_{f}}\approx28$}
% \label{fig:allradialS22}
%\end{figure}
%\end{frame}
%
%\begin{frame}
%\frametitle{$\tau_{xz}$ along radial sections}
%\vspace{-0.35cm}
%\centering
%\captionsetup[subfigure]{font=scriptsize,labelfont=scriptsize}
%\begin{figure}[!h]
%\centering
%\includegraphics[height=0.7\textheight]{AllRadialSections-S12.pdf}
% \caption{$\Delta\theta=5^{\circ},\delta=0.4^{\circ},VF_{f}=0.001,\frac{l}{R_{f}}\approx28$}
% \label{fig:allradialS12}
%\end{figure}
%\end{frame}
\section{Conclusions}
\begin{frame}
\frametitle{\vspace*{0.5cm} Conclusions}
\vspace{-0.75cm}
\centering
\scriptsize
\begin{alertblock}{\footnotesize \bf{Conclusions}}
\begin{itemize}
\item D\'eveloppement des mod\`eles VER pour l'\'etude du processus de fissuration
\item Proc\'edure num\'erique pour la cr\'eation automatique de la g\'eom\'etrie et mod\`ele MEF
\item Analyses avec $VF_{f}\to 0$ (matrice infinie) pour la validation des mod\`eles
\end{itemize}
\end{alertblock}
\begin{alertblock}{\footnotesize \bf{Actions \`a suivre}}
\begin{itemize}
\item \'Etudier les effets de $VF_{f}$, $t_{pli}$, $\frac{t_{pli}}{t_{plis\ proches}}$ et propri\'et\'es du mat\'eriau\\[9pt]
\item \'Etudier l'effet des diff\`erent conditions limites
\end{itemize}
\end{alertblock}
\end{frame}
\section{Annexes \& Bibliographie}
\subsection{Annexes}
%\begin{frame}[label=]
%\frametitle{}
%\end{frame}
\begin{frame}
\frametitle{Spread Tow Technology: Implications}
\vspace{-0.75cm}
\centering
\begin{itemize}
\item Strong reduction in ply's thickness and weight
\item Reduction in laminate's thickness and weight
\item Higher fiber volume fraction and more homogeneous fiber distribution
\item Ply thickness to fiber diameter ratio decreases of at least 1 order of magnitude, from $>100$ to $\leq10$
\item Increased load at damage onset and increased ultimate strength, in particular for transverse cracking
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{RVEs: Variations on a Theme}
\vspace{-0.75cm}
\centering
\begin{figure}[!h]
\centering
\subfloat{\includegraphics[height=0.4\textheight]{SingleRVEs.pdf}}\quad
\subfloat{\includegraphics[height=0.4\textheight]{boundedRVE_cc.pdf}}\quad
\subfloat{\includegraphics[height=0.4\textheight]{periodicRVE_cc.pdf}}
%\caption{Single RVE model.}
\label{fig:RVEs-variations}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{RVEs: First Variation on a Theme}
\vspace{-0.25cm}
\centering
\begin{figure}
\centering
\includegraphics[height=0.7\textheight]{LEFM2DsRVEsFsDdepverdispBCULappAxialDispLR.pdf}
\caption{\scriptsize Isolated RVE with zero vertical displacement BC.}
\label{fig:singleRVE-rigid}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{RVEs: Second Variation on a Theme}
\vspace{-0.25cm}
\centering
\begin{figure}
\centering
\includegraphics[height=0.7\textheight]{LEFM2DsRVEsFsDhomoBCULappAxialDispLR.pdf}
\caption{\scriptsize Isolated RVE with homogeneous displacement BC.}
\label{fig:singleRVE-homo}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{RVEs: Third Variation on a Theme}
\vspace{-0.25cm}
\centering
\begin{figure}
\centering
\includegraphics[height=0.7\textheight]{boundedRVE_cc.pdf}
\caption{\scriptsize Bounded RVE.}
\label{fig:boundedRVE}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Topological transformation}
\vspace{-1cm}
\centering
\captionsetup[subfigure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\subfloat[]{\includegraphics[height=0.4\textheight]{mesh_regions.pdf}}\qquad
\subfloat[]{\includegraphics[height=0.4\textheight]{opening1.pdf}}\\
\subfloat[]{\includegraphics[height=0.3\textheight]{opening2.pdf}}\qquad
\subfloat[]{\includegraphics[width=0.6\textwidth]{opening3.pdf}}
%\caption{Single RVE model.}
\label{fig:topological-geom-transf}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Mesh parameters}
\vspace{-0.7cm}
\centering
\captionsetup[subfigure]{font=scriptsize,labelfont=scriptsize}
\begin{figure}[!h]
\centering
\subfloat{\includegraphics[height=0.75\textheight]{mesh_parameters_single.pdf}}\qquad
\subfloat{\includegraphics[height=0.75\textheight]{mesh_parameters_bounded.pdf}}
%\caption{Single RVE model.}
\label{fig:mesh-params}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Finite Element Model in Abaqus}
\vspace{-0.7cm}
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{table}[h!]
\scriptsize
\centering
%\caption{Analysis methods summary.}
\begin{tabularx}{\textwidth}{X}
\toprule
\midrule
\textbf{Method}\\
ABAQUS/STD static analysis + VCCT + J-integral.\\
\midrule
\textbf{Type}\\
Static, i.e. no inertial effects. Relaxation until equilibrium.\\
\midrule
\textbf{Elements}\\
CPE4/CPE8\\
\midrule
\textbf{Interface}\\
Tied surface constraint \& contact mechanics\\
\midrule
\textbf{Input variables}\\
$R_{f}$, $V_{f}$, material properties, interface properties.\\
\midrule
\textbf{Control variables}\\
$\theta$, $\Delta\theta$, $\bar{\varepsilon}_{x}$.\\
\midrule
\textbf{Output variables} \\
Stress field, crack tip stress, stress intensity factors, energy release rates, $a$.\\
\midrule
\bottomrule
\end{tabularx}%
\label{tab:analysis_tab}%
\end{table}
\end{frame}
\begin{frame}
\frametitle{\small Evaluation of $G_{0}$}
\vspace{-0.7cm}
\footnotesize
\centering
\captionsetup[figure]{font=scriptsize,labelfont=scriptsize}
\begin{equation}
G_{0}=\pi R_{f}\sigma^{2}_{0}\frac{1+k_{m}}{8G_{m}}
\end{equation}
\begin{equation}
k_{m}=3-4\nu_{m}
\end{equation}
\begin{equation}
\sigma_{0}^{undamaged}=\frac{E_{m}}{1-\nu^{2}_{m}}\varepsilon_{xx}
\end{equation}%
\end{frame}
%\section{References}
%\begin{frame}[t,label=references,allowframebreaks]
% \frametitle{References}
% \begin{itemize}
%% \item Loading rate effects on delamination:\\[10pt] \textit{Loading\_rate\_effects\_on\_CFRP.bib}\\[30pt]
% \item Body-fitted grids for FSI modeling with LBM:\\[10pt] %\textit{Fluid\_structure\_interaction\_on\_deformable\_surfaces.bib}
% \end{itemize}
% \bibliographystyle{amsalpha}
% {\footnotesize
% \bibliography{PSI_talk.bib}
% }
%\bibliography{/auto.mounter/home/lucadistasio/Documents/ETH/Research_material/References/fsi_references_kbib.bib}
%\end{frame}
\subsection{Bibliographie}
\begin{frame}[allowframebreaks]
\frametitle{Bibliographie}
\begin{thebibliography}{10}
% \beamertemplatebookbibitems
% % Start with overview books.
%
% \bibitem{Author1990}
% A.~Author.
% \newblock {\em Handbook of Everything}.
% \newblock Some Press, 1990.
\beamertemplatearticlebibitems
% Followed by interesting articles. Keep the list short.
\bibitem{DonaldL.Flaggs1982}
Donald L. Flaggs, Murat H. Kural;
\newblock {\em Experimental Determination of the In Situ Transverse Lamina Strength in Graphite/Epoxy Laminates.}
\newblock Journal of Composite Materials, vol. 16, n. 2, 1982.
\bibitem{Parvizi1978}
Parvizi A., Bailey J.E;
\newblock {\em On multiple transverse cracking in glass fibre epoxy cross-ply laminates.}
\newblock Journal of Materials Science, 1978; 13:2131-2136.
\bibitem{herraez2015}
Miguel Herr\'aez, Diego Mora, Fernando Naya, Claudio S. Lopes, Carlos Gonz\'alez, Javier LLorca;
\newblock {\em Transverse cracking of cross-ply laminates: A computational micromechanics perspective.}
\newblock Composites Science and Technology, 2015; 110:196-204.
\bibitem{Canal2012}
Luis Pablo Canal, Carlos Gonz\'alez, Javier Segurado, Javier LLorca;
\newblock {\em Intraply fracture of fiber-reinforced composites: Microscopic mechanisms and modeling.}
\newblock Composites Science and Technology, 2012; 72(11):1223-1232.
\bibitem{StephenW.Tsai2005}
Stephen W. Tsai;
\newblock {\em Thin ply composites.}
\newblock JEC Magazine 18, 2005.
\bibitem{ZnedekP.Bazant2002}
Znedek P. Bazant;
\newblock {\em Size Effect Theory and its Application to Fracture of Fiber Composites and Sandwich Plates.}
\newblock in Continuum Damage Mechanics of Materials and Structures, eds. O. Allix and F. Hild, 2002.
\bibitem{RobinAmacherWayneSmithClemensDransfeldJohnBotsis2014}
Robin Amacher, Wayne Smith, Clemens Dransfeld, John Botsis, Jo\"el Cugnoni;
\newblock {\em Thin Ply: from Size-Effect Characterization to Real Life Design}
\newblock CAMX 2014, 2014
\bibitem{RalfCuntze}
Ralf Cuntze;
\newblock {\em The World-Wide-Failure-Exercises -I and - II for UD-materials.}
\bibitem{Pinho}
Pinho, S. T. and Pimenta, S.;
\newblock {\em Size Effects on the Strength and Toughness of Fibre-Reinforced Composites.}
\bibitem{PedroP.CamanhoCarlosG.DavilaSilvestreT.PinhoLorenzoIannucci2006}
Pedro P. Camanho, Carlos G. D\'avila, Silvestre T. Pinho, Lorenzo Iannucci, Paul Robinson;
\newblock {\em Prediction of in situ strengths and matrix cracking in composites under transverse tension and in-plane shear.}
\newblock Composites Part A: Applied Science and Manufacturing, vol. 37, n. 2, 2006.
\bibitem{P.P.CamanhoP.Maimi2007}
P.P. Camanho, P. Maim\'i, C.G. D\'avila;
\newblock {\em Prediction of size effects in notched laminates using continuum damage mechanics.}
\newblock Composites Science and Technology, vol. 67, n. 13, 2007.
\bibitem{Nairn1992}
J. A. Nairn;
\newblock {\em The Initiation and Growth of Delaminations Induced by Matrix Microcracks in Laminated Composites.}
\newblock International Journal of Fracture, vol. 57, 1992.
\bibitem{JoelCugnoniRobinAmacher2013}
Joel Cugnoni , Robin Amacher, John Botsis;
\newblock {\em Thin ply technology advantages. An overview of the TPT-TECA project.}
\newblock 2014.
\bibitem{DonaldL.Flaggs1982}
Donald L. Flaggs, Murat H. Kural;
\newblock {\em Experimental Determination of the In Situ Transverse Lamina Strength in Graphite/Epoxy Laminates.}
\newblock Journal of Composite Materials, vol. 16, n. 2, 1982.
\end{thebibliography}
\end{frame}
\begin{frame}[plain]
\frametitle{}
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.7385280455,
"avg_line_length": 36.2992202729,
"ext": "tex",
"hexsha": "b0c8560413bdd04991c67c54106d9056836e8544",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-12-14T20:55:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-08-09T19:20:39.000Z",
"max_forks_repo_head_hexsha": "813bdeef7e07db6b7830d41fcca198f8dd2eb3cf",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "LucaDiStasio/thinPlyMechanics",
"max_forks_repo_path": "tex/02_Slides/Slides_11_Journee-Thematique-Equipe-304/JT_Equipe-304_FR.tex",
"max_issues_count": 123,
"max_issues_repo_head_hexsha": "813bdeef7e07db6b7830d41fcca198f8dd2eb3cf",
"max_issues_repo_issues_event_max_datetime": "2018-06-21T12:01:30.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-07T14:05:04.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "LucaDiStasio/thinPlyMechanics",
"max_issues_repo_path": "tex/02_Slides/Slides_11_Journee-Thematique-Equipe-304/JT_Equipe-304_FR.tex",
"max_line_length": 249,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "813bdeef7e07db6b7830d41fcca198f8dd2eb3cf",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "LucaDiStasio/thinPlyMechanics",
"max_stars_repo_path": "tex/02_Slides/Slides_11_Journee-Thematique-Equipe-304/JT_Equipe-304_FR.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-04T03:53:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-06-04T10:15:30.000Z",
"num_tokens": 12368,
"size": 37243
} |
% How VMware/Vbox link parent and child Virtual Disks.
\documentclass{article}
%\title[Stuart 1 XJC 0]{How I fought with XJC and won!}
\title{Virtual Disk Generation Linking}
\author{Stuart Maclean \\
Applied Physics Laboratory \\
University of Washington}
%\texttt{[email protected]}}
\date{October 2015}
\begin{document}
\maketitle
\section{Introduction}
See also the VMWare 'vmdk\_specs.pdf' file, which contains a
field-by-field description of the VMWare 'Descriptor File' format.
\section{Virtual Box VDI Files}
\section{VMWare VMDK Files}
VMDKFile metadata produced by vmvols tool 'vmdkinfo'. Prints
SparseExtentHeader and Descriptor info at head of any .vmdk file.
We have examples of various standalone (single generation) and
parent-child relationships across .vmdk files where
\begin{itemize}
\item both initial disk and snapshot
created within a VMWare host-based product, e.g. Workstation.
\item both initial disk and snapshot
created within VirtualBox, using the .vmdk hard drive file type (and
not the native VDI format)
\item the initial disk is via an VirtualBox import operation of an ovf
package.
\item we also examine the streamOptimized vmdk file variant used in
ovf/ova packages (an output of e.g. packer)
\end{itemize}
\subsection{Disks created by VMWare Workstation}
First generation of a virtual machine hard drive. VM generated by
VMWare Workstation Pro 12 (trial edition), Oct 2015.
\begin{verbatim}
$ vmdkinfo ~/vmware/Windows\ 7\ x64/Windows\ 7\ x64.vmdk
Flags: 00000003
Version: 1
Capacity: 125829120
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 20
NumGTEsPerGT: 512
rgdOffset: 21
gdOffset: 7716
Overhead: 15488
Compression: 0
# Disk DescriptorFile
version=1
encoding="UTF-8"
CID=7c77be3e
parentCID=ffffffff
isNativeSnapshot="no"
createType="monolithicSparse"
# Extent description
RW 125829120 SPARSE "Windows 7 x64.vmdk"
# The Disk Data Base
#DDB
ddb.adapterType = "lsilogic"
ddb.geometry.cylinders = "7832"
ddb.geometry.heads = "255"
ddb.geometry.sectors = "63"
ddb.longContentID = "998e1e01d9a37fe7ae03f9e17c77be3e"
ddb.uuid = "60 00 C2 92 c7 b3 76 ce-43 c5 92 ee 09 64 6c 58"
ddb.virtualHWVersion = "12"
\end{verbatim}
\subsection{Snapshot Disks created by VMWare Workstation}
Snapshot (second generation) of the virtual machine hard drive
above. VM generated by VMWare Workstation Pro 12 (trial edition), Oct
2015. Note how little/vague the 'parent pointer' is, just a 'hint' in
the descriptor.
\begin{verbatim}
$ vmdkinfo ~/vmware/Windows\ 7\ x64/Windows\ 7\ x64-000001.vmdk
Flags: 00000003
Version: 1
Capacity: 125829120
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 20
NumGTEsPerGT: 512
rgdOffset: 21
gdOffset: 7716
Overhead: 15488
Compression: 0
# Disk DescriptorFile
version=1
encoding="UTF-8"
CID=7c77be3e
parentCID=7c77be3e
isNativeSnapshot="no"
createType="monolithicSparse"
parentFileNameHint="/home/stuart/vmware/Windows 7 x64/Windows 7 x64.vmdk"
# Extent description
RW 125829120 SPARSE "Windows 7 x64-000001.vmdk"
# The Disk Data Base
#DDB
\end{verbatim}
Heuristic: If descriptor has parentFileNameHint=entry, use it in locating
a parent. The CID and parentCID in the child are equal, and both are
equal to the CID in the parent file. Use this fact as an extra check?
\subsection{Further Snapshot Disks created by VMWare Workstation}
Additional snapshot (third generation) of the virtual machine hard
drive above in examples 1 and 2. VM generated by VMWare Workstation
Pro 12 (trial edition), Oct 2015. Note how little/vague the 'parent
pointer' is, just a 'hint' in the descriptor.
\begin{verbatim}
[~/.../infosec/vmvols/cli (master)]$ ./vmdkinfo ~/vmware/Windows\ 7\ x64/Windows\ 7\ x64-000002.vmdk
Flags: 00000003
Version: 1
Capacity: 125829120
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 20
NumGTEsPerGT: 512
rgdOffset: 21
gdOffset: 7716
Overhead: 15488
Compression: 0
# Disk DescriptorFile
version=1
encoding="UTF-8"
CID=7c77be3e
parentCID=7c77be3e
isNativeSnapshot="no"
createType="monolithicSparse"
parentFileNameHint="/home/stuart/vmware/Windows 7 x64/Windows 7 x64-000001.vmdk"
# Extent description
RW 125829120 SPARSE "Windows 7 x64-000002.vmdk"
# The Disk Data Base
#DDB
\end{verbatim}
Note the parentFileNameHint in example 3, as was in example 2. Note
also that example 3 appears to have no 'DDB' entries, same as example 2.
\subsection{Disks of a Cloned VM in VMware Workstation}
Clone of the VM containing the disk mentioned above in examples 1-3.
Clone operation done in same VMware Workstation product as used to
build the 1-3 examples disk. Note the somewhat odd .vmdk name,
containing 'cl' for 'clone'?
\begin{verbatim}
[~/.../infosec/vmvols/model (master)]$ ../cli/vmdkinfo ~/vmware/Clone\ of\ Windows\ 7\ x64/Windows\ 7\ x64-cl1.vmdk
Flags: 00000003
Version: 1
Capacity: 125829120
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 20
NumGTEsPerGT: 512
rgdOffset: 21
gdOffset: 7716
Overhead: 15488
Compression: 0
# Disk DescriptorFile
version=1
encoding="UTF-8"
CID=7c77be3e
parentCID=ffffffff
isNativeSnapshot="no"
createType="monolithicSparse"
# Extent description
RW 125829120 SPARSE "Windows 7 x64-cl1.vmdk"
# The Disk Data Base
#DDB
ddb.adapterType = "lsilogic"
ddb.deletable = "true"
ddb.geometry.cylinders = "7832"
ddb.geometry.heads = "255"
ddb.geometry.sectors = "63"
ddb.longContentID = "ba254740a7f06f67777b34661a687c32"
ddb.uuid = "60 00 C2 91 ac 9e 55 71-5a a4 b4 1c 71 48 8f be"
ddb.virtualHWVersion = "12"
\end{verbatim}
Note how the CID is actually the same as in the VM from which this VM
was cloned!! This is hopeless! The CID cannot be used as a
definitive identifier for a VMDK disk! Hint: Use the db.uuid
instead?? Or some combo of the two??
\subsection{Disks created by Packer}
Standalone vmdk produced by packer (v0.8.6) with a 'virtualbox-iso'
builder. Note that even though the configuration is for virtualBox,
the file format for the virtual disk is a .vmdk. Note how the
createType is 'streamOptimized'. The .vmdk file is associated with a
.ovf file (found alongside the .vmdk). Note also how packer adds the
'ddb.uimage' fields into the descriptor. These will be used in
VirtualBox imports at least.
\begin{verbatim}
$ ./vmdkinfo ~/apl/projects/infosec/packer-vms/ubuntu-12.04.4-amd64/base/products/ubuntu-12.04.5-amd64-base-disk1.vmdk
Flags: 00030001
Version: 3
Capacity: 81920000
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 2
NumGTEsPerGT: 512
rgdOffset: 0
gdOffset: -1
Overhead: 128
Compression: 1
# Disk DescriptorFile
version=1
CID=9f5528be
parentCID=ffffffff
createType="streamOptimized"
# Extent description
RDONLY 81920000 SPARSE "ubuntu-12.04.5-amd64-base-disk1.vmdk"
# The disk Data Base
#DDB
ddb.virtualHWVersion = "4"
ddb.adapterType="ide"
ddb.geometry.cylinders="16383"
ddb.geometry.heads="16"
ddb.geometry.sectors="63"
ddb.geometry.biosCylinders="1024"
ddb.geometry.biosHeads="255"
ddb.geometry.biosSectors="63"
ddb.uuid.image="ba1d7b83-2e83-4777-90fb-61c8251ccd69"
ddb.uuid.parent="00000000-0000-0000-0000-000000000000"
ddb.uuid.modification="00000000-0000-0000-0000-000000000000"
ddb.uuid.parentmodification="00000000-0000-0000-0000-000000000000"
ddb.comment=""
\end{verbatim}
Note the ddb.uuid.* entries in the DDB section. Not sure what these
are for?
Recall that this streamOptimized form of VMDK file is suitable for
OVF/OVA packages but not for direct attachment into a VM. Instead,
its OVF/OVA package must be first imported by the VM engine. The
import process reads the streamOptimized input, but writes e.g. a
monolithicSparse version locally as the VM's actual virtual disk
format.
\subsection{VMDK Disks created by/in VirtualBox}
Created in/by VirtualBox, but by selecting 'VMDK' as the 'Hard Drive
Format Type' in the VM creation wizard.
\begin{verbatim}
$ ./vmdkinfo ~/VirtualBox\ VMs/Blank_VMDK/Blank_VMDK.vmdk
Flags: 00000003
Version: 1
Capacity: 268435456
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 20
NumGTEsPerGT: 512
rgdOffset: 21
gdOffset: 16437
Overhead: 32896
Compression: 0
# Disk DescriptorFile
version=1
CID=fe21c26a
parentCID=ffffffff
createType="monolithicSparse"
# Extent description
RW 268435456 SPARSE "Blank_VMDK.vmdk"
# The disk Data Base
#DDB
ddb.virtualHWVersion = "4"
ddb.adapterType="ide"
ddb.uuid.image="c86e611c-1092-48b0-b257-3e9480018efa"
ddb.uuid.parent="00000000-0000-0000-0000-000000000000"
ddb.uuid.modification="00000000-0000-0000-0000-000000000000"
ddb.uuid.parentmodification="00000000-0000-0000-0000-000000000000"
\end{verbatim}
\subsection{Snapshots in VirtualBox of VMDK Disks created in VirtualBox}
A snapshot, taken by/in VirtualBox, of the .vmdk file described above
(previous subsection):
\begin{verbatim}
$ ./vmdkinfo ~/VirtualBox\ VMs/Blank_VMDK/Snapshots/\{b23c64d7-e938-4f7b-bc80-c56f4390dfe7\}.vmdk
Flags: 00000003
Version: 1
Capacity: 268435456
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 20
NumGTEsPerGT: 512
rgdOffset: 21
gdOffset: 16437
Overhead: 32896
Compression: 0
# Disk DescriptorFile
version=1
CID=f5050853
parentCID=ffffffff
createType="monolithicSparse"
# Extent description
RW 268435456 SPARSE "{b23c64d7-e938-4f7b-bc80-c56f4390dfe7}.vmdk"
# The disk Data Base
#DDB
ddb.virtualHWVersion = "4"
ddb.adapterType="ide"
ddb.uuid.image="b23c64d7-e938-4f7b-bc80-c56f4390dfe7"
ddb.uuid.parent="c86e611c-1092-48b0-b257-3e9480018efa"
ddb.uuid.modification="00000000-0000-0000-0000-000000000000"
ddb.uuid.parentmodification="00000000-0000-0000-0000-000000000000"
\end{verbatim}
Note how the CID and parentCID in this descriptor do {\em not} given
any hint on how to locate/identify the parent. The CIDs of the example
disks 5 and 5 appear unrelated.
What {\em is} useful are the ddb.uuid.* entries, which VirtualBox
seems to use whereas VMware products do not. The parent-child
relationship between example disk 5 and 6 can be seen in the
uuid.parent of disk 6 and uuid.image of disk 5. Recall that in
VirtualBox's native VDI format, uuidImage and uuidParent are both
fields present in the 'VDI header', so it's almost as if VirtualBox
are shoe-horning their own parent/child linking info idea into a VMDK
Descriptor file.
Heuristic: If descriptor has ddb.uuid.parent entry, use it in locating
a parent. Such a parent will have a ddb.uuid.image descriptor entry
whose value matches the uuid.parent entry in the child.
\subsection{Disk created by VirtualBox via Import of OVF/OVA}
VirtualBox import of the OVF package created for Example Disk 5:
\begin{verbatim}
$ ./vmdkinfo ~/VirtualBox\ VMs/ubuntu-12.04.5-amd64-base/ubuntu-12.04.5-amd64-base-disk1.vmdk
Flags: 00000003
Version: 1
Capacity: 81920000
GrainSize: 128
DescriptorOffset: 1
DescriptorSize: 20
NumGTEsPerGT: 512
rgdOffset: 21
gdOffset: 5031
Overhead: 10112
Compression: 0
# Disk DescriptorFile
version=1
CID=6704c82b
parentCID=ffffffff
createType="monolithicSparse"
# Extent description
RW 81920000 SPARSE "ubuntu-12.04.5-amd64-base-disk1.vmdk"
# The disk Data Base
#DDB
ddb.virtualHWVersion = "4"
ddb.adapterType="ide"
ddb.geometry.cylinders="16383"
ddb.geometry.heads="16"
ddb.geometry.sectors="63"
ddb.geometry.biosCylinders="1024"
ddb.geometry.biosHeads="255"
ddb.geometry.biosSectors="63"
ddb.uuid.image="429e7834-80be-4bf5-a72f-a16c18cde00d"
ddb.uuid.parent="00000000-0000-0000-0000-000000000000"
ddb.uuid.modification="00000000-0000-0000-0000-000000000000"
ddb.uuid.parentmodification="00000000-0000-0000-0000-000000000000"
ddb.comment=""
\end{verbatim}
\end{document}
% eof
| {
"alphanum_fraction": 0.7816631872,
"avg_line_length": 26.4884792627,
"ext": "tex",
"hexsha": "2d50e76861c2d0bf4e4bb072d9ef114d49529ea3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0cce272ef9e4ec07af658d6f7b739270bceddf15",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "UW-APL-EIS/vmvols-java",
"max_forks_repo_path": "model/doc/ParentChildLinking/VirtualDiskGenerationLinking.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0cce272ef9e4ec07af658d6f7b739270bceddf15",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "UW-APL-EIS/vmvols-java",
"max_issues_repo_path": "model/doc/ParentChildLinking/VirtualDiskGenerationLinking.tex",
"max_line_length": 119,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "0cce272ef9e4ec07af658d6f7b739270bceddf15",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "UW-APL-EIS/vmvols-java",
"max_stars_repo_path": "model/doc/ParentChildLinking/VirtualDiskGenerationLinking.tex",
"max_stars_repo_stars_event_max_datetime": "2016-07-08T02:53:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-06-07T08:37:34.000Z",
"num_tokens": 3741,
"size": 11496
} |
%%%%%%%%%%%%%%%%
% LaTeX
%%%%%%%%%%%%%%%%%%
\documentclass[12pt]{article}
\oddsidemargin 0in \evensidemargin 0in \textheight 9in \textwidth
6.5in \topmargin 0in \headheight 0in
\parindent 20 pt
\headsep 0 in
\usepackage{amsthm,amssymb}
\usepackage{gb4e} % by me
\newtheorem*{thm}{Theorem}
%\newtheorem{cor}[thm]{Corollary}
\newtheorem{theorem}{Theorem}%[section]
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{hypothesis}[theorem]{Hypothesis}
\newtheorem{example}[theorem]{Example}
%\newenvironment{proof}[1][Proof]%
%{\par\addvspace{6pt}\noindent{\bf #1.}\hskip\labelsep\ignorespaces}%
%{{\hfill $\square$}\par\addvspace{6pt}}
\def\bull{\vrule height .9ex width .8ex depth -.1ex }
%\magnification=\magstep1
\raggedbottom
\def\card#1{\vert #1 \vert}
\def\gpindex#1#2{\card {#1\colon #2}}
\def\irr#1{{\textrm Irr}(#1)}
\def\ibr#1{{\textrm IBr} (#1)}
\def\irri#1#2{{\textrm Irr}_{#1} (#2)}
\def\cd#1{{\textrm cd}(#1)}
\def\cent#1#2{{\bf C}_{#1}(#2)}
\def\gpcen#1{{\bf Z} (#1)}
\def\ker#1{{\textrm ker} (#1)}
%\def\ref#1{{\bf [#1]}}
\def\form#1#2#3{\langle\langle #1, #2 \rangle\rangle_{#3}}
\def\empform#1{\form {\cdot}{\cdot}{#1}}
\def\norm#1#2{{\textrm N}_{#1} (#2)}
\def\NN{{\cal N}}
\def\SS{{\cal S}}
\def\UU{{\cal U}}
\def\TT{{\cal T}}
\def\B#1#2{{\textrm B}_{#1} (#2)}
\def\Bpi#1{\B {\pi}{#1}}
\def\D#1#2{{\textrm D}_{#1} (#2)}
\def\Dpi#1{\D {\pi} {#1}}
\def\I#1#2{{\textrm I}_{#1} (#2)}
\def\Ipi#1{\I {\pi}{#1}}
\def\phi{\varphi}
\newcommand \IIpi[3] {{\textrm I}_{#1} (#2 \mid #3)}
\begin{document}
\title{Inducing $\pi$-partial characters with a given vertex}
\author {
Mark L.\ Lewis
\\ {\it Department of Mathematical Sciences, Kent State University}
\\ {\it Kent, Ohio 44242}
\\ E-mail: [email protected]
}
%\date{July 17, 2010}
\maketitle
\begin{abstract}
Let $G$ be a solvable group. Let $p$ be a prime and let $Q$ be a
$p$-subgroup of a subgroup $V$. Suppose $\phi \in \ibr G$. If
either $|G|$ is odd or $p = 2$, we prove that the number of Brauer
characters of $H$ inducing $\phi$ with vertex $Q$ is at most $|\norm
GQ: \norm VQ|$.
MSC Primary: 20C20, MSC Secondary: 20C15
Keywords: Brauer characters, partial characters, vertices
\end{abstract}
%Inducing partial characters with a given vertex
%These were originally notes written in December 2010. This result
%arose in some work that I doing with JP.
\section{Introduction}
Throughout this note, $G$ is a finite group, and $\irr G$ is the set
of irreducible characters of $G$. Suppose $\chi \in \irr G$ and $H$
is a subgroup of $G$. It is easy to obtain an upper bound on the
number of characters in $\irr H$ that induce $\chi$. Let $\phi_1,
\dots, \phi_n \in \irr H$ be the characters so that $\phi_i^G =
\chi$. Evaluating at $1$, we obtain $\phi_i (1) = \chi (1)/|G:H|$
for each $i$. By Frobenius reciprocity (Lemma 5.2 of \cite{text}),
each $\phi_i$ is a constituent of $\chi_H$ with multiplicity $1$.
Since there are $n$ such characters occurring as constituents of
$\chi_H$, it follows that $n (\chi (1)/|G:H|) \le \chi (1)$. We
deduce that $n \le |G:H|$, and we have an upper bound. If $H$ is
normal in $G$, this bound is obtained, and it is not particularly
difficult to find nonnormal subgroups where this bound is obtained.
We now turn our attention to Brauer characters. Fix a prime $p$. We
will write $\ibr G$ for the irreducible $p$-Brauer characters of
$G$. If $\phi \in \ibr G$, then it is easy to adapt the above proof
to show that $\phi$ is induced by at most $|G:H|$ Brauer characters
of $H$. However, associated with $\phi$ are certain $p$-subgroups
of $G$ called the vertex subgroups. When $G$ is a $p$-solvable
group, a $p$-subgroup $Q$ of $G$ is defined to be a vertex for
$\phi$ if there is a subgroup $U$ of $G$ so that $\phi$ is induced
by a Brauer character of $U$ with $p'$-degree and $Q$ is a Sylow
subgroup of $U$. It is known that all the vertex subgroups of
$\phi$ are conjugate in $G$. If $\phi$ is induced from $\tau \in
\ibr H$, it is easy to see that a vertex for $\tau$ is a vertex for
$\phi$. Thus, $H$ contains some vertex $Q$ for $\phi$. Now,
different Brauer characters of $H$ that induce $\phi$ may have
vertex subgroups that are not conjugate in $H$ but are necessarily
conjugate in $G$. Hence, one can ask the following question: Suppose
$\phi \in \ibr G$ has vertex $Q$, and $Q \le H$, how many characters
in $\ibr H$ with vertex $Q$ induce $\phi$? When either $|G|$ is odd
or $G$ is solvable and $p = 2$, we can obtain an upper bound for
this question.
\begin{theorem} \label{main}
Let $G$ be a solvable group and $p$ a prime. Assume either $|G|$ is
odd or $p = 2$. Let $Q$ be a $p$-subgroup of $H$. If $\phi \in \ibr
G$, then the number of Brauer characters of $H$ with vertex $Q$ that
induce $\phi$ is at most $|\norm GQ:\norm HQ|$.
\end{theorem}
At this time, we are not able to determine whether or not this
theorem is true if we loosen the hypothesis that either $|G|$ is odd
or $p = 2$. In other words, is the conclusion still true if $G$ is
a solvable group of even order and $p$ is an odd prime.
%%%%%%%%%%%%%%%%%%INSERT HERE
%%%%%%%%%%%END INSERT
This result
was motivated by our work with J. P. Cossey. If we could prove the
conclusion of Theorem \ref{main} when $p$ is odd, then we would be
able to prove J. P. Cossey's conjecture that the number of lifts of
a Brauer character is bounded by the index of a vertex subgroup in
the vertex subgroup when $p$ is odd. Our argument can be found in
the preprint \cite{preprint}.
We would like to thank J. P. Cossey and I. M. Isaacs for several
helpful discussions while we were preparing this note.
\section{Results}
We will in the more general setting of irreducible $\pi$-partial
characters of a $\pi$-separable group $G$. We here briefly mention
that if $\pi$ is a set of primes and $G$ is a $\pi$-separable group,
one can define (see \cite{pipart} for more details) a set of class
functions $\Ipi G$ from the set $G^o$ (which consists of the
elements of $G$ whose order is divisible by only the primes in
$\pi$) to ${\bf C}$ that plays the role of $\ibr G$, and in fact
$\Ipi G = \ibr G$ if $\pi = \{ p' \}$, the complement of the prime
$p$.
%In this note, I fix a $\pi$-partial character $\phi$ and a subgroup
%$V$ that contains a $\pi'$-subgroup $Q$. I obtain the upper bound
%of $|\norm GQ:\norm VQ|$ on the number of $\pi$-partial characters
%of $V$ with vertex $Q$ that induce $\phi$ under the assumption that
%$G$ is solvable and either $|G|$ is odd or $2 \not\in \pi$.
%
%Of course, the case I need for the work with JP is the case when $2
%\in \pi$. :-(
We start by considering vertices in Clifford correspondence (see
Proposition 3.2 of \cite{Fong}). Let $G$ be a $\pi$-separable
group. Let $N$ be a normal subgroup of $G$. Fix $\phi \in \Ipi G$.
If $\alpha \in \Ipi N$ is a constituent of $\phi_N$, then we write
$G_\alpha$ for the stabilizer of $\alpha$ in $G$, and we write
$\phi_\alpha$ for the Clifford correspondent of $\phi$ with respect
to $\alpha$. In particular, the vertices of the Clifford
correspondent form an orbit under the action of the normalizer of a
particular vertex.
\begin{lemma}\label{cliff}
Let $G$ be a $\pi$-separable group. Let $N$ be a normal subgroup of
$G$. Suppose that $\alpha \in \Ipi N$. Let $\phi \in \Ipi G$ and
$\hat\phi \in \Ipi {G_\alpha}$ so that $\hat\phi^G = \phi$. Suppose
that $Q$ is a vertex for $\hat\phi$. Then $Q$ is a vertex is
$\hat\phi^g$ if and only if there exists $n \in \norm GQ$ so that
$G_\alpha g = G_\alpha n$.
\end{lemma}
\begin{proof}
We first suppose that there exists $n \in \norm GQ$ so that
$G_\alpha g = G_\alpha n$. Thus, $g = tn$ for some $t \in
G_\alpha$. We see that $\hat\phi^g = \hat\phi^{tn} = \hat\phi^n$.
We see that $Q = Q^n$ is a vertex for $\hat\phi^n = \hat\phi^g$.
Conversely, suppose that $Q$ is a vertex for $\hat\phi^g$. Then
$Q^{g^{-1}}$ is a vertex for $\hat\phi$. Since $Q$ is also a vertex
for $\hat\phi$, we have $Q^{g^{-1}} = Q^t$ for some $t \in
G_\alpha$. It follows that $Q = Q^{tg}$, and so, $tg \in \norm GQ$.
This implies that $tg = n$ for some $n \in \norm GQ$. This implies
that $n \in G_\alpha g$, and we conclude that $G_\alpha n = G_\alpha
g$.
\end{proof}
We continue to work in the context of the Clifford correspondence.
In this case, we can get an exact count of the number of partial
characters in $N$ whose Clifford correspondent has vertex $Q$.
\begin{corollary}\label{cliff count}
Let $G$ be a $\pi$-separable group. Let $N$ be a normal subgroup of
$G$, let $\phi \in \Ipi G$ have vertex $Q$, and suppose that $\beta$
is an irreducible constituent of $\phi_N$ so that $\phi_\beta$ has
vertex $Q$. Then $|\{ \alpha \in \Ipi N \mid \phi_\alpha {\textrm ~
has~vertex~} Q \}| = |\norm GQ : \norm {G_\beta}Q|$.
\end{corollary}
\begin{proof}
%We know that $\phi_\beta$ has vertex $Q^*$. Observe that $Q^*$ is a
%vertex for $\phi$ so $(Q^*)^g = Q$ for some $g \in G$. Replacing
%$\beta$ by $\beta^g$, we may assume that $Q^* = Q$, so $Q$ is a
%vertex for $\phi_\beta$.
By Lemma \ref{cliff}, we see that $\phi_\alpha$ has $Q$ as a vertex
if and only if $\alpha = \beta^g$ where $g \in G$ and $g \in G_\beta
n$ for some $n \in \norm GQ$. Finally, we observe that $G_\beta n_1
= G_\beta n_2$ if and only if $\norm {G_\beta}Q n_1 = \norm
{G_\beta}Q n_2$ for $n_1, n_2 \in \norm GQ$. We have $|\{ \alpha
\in \Ipi N \mid \phi_\alpha {\textrm ~ has~vertex~} Q \}| = | \{ G_\beta
n \mid n \in \norm GQ \} | = |\norm GQ :\norm {G_\beta}Q|$.
\end{proof}
We now look at the conditions of a minimal counterexample. For this
we need to review and develop more notation. We make use of the
canonical set of $\pi$-lifts, $\Bpi G$, that was defined in
\cite{pisep} by Isaacs. In other words, $\Bpi G \subseteq \irr G$
and the map $\chi \mapsto \chi^o$ is a bijection from $\Bpi G$ to
$\Ipi G$. Closely related to this set is the subnormal nucleus
which also was defined in \cite{pisep}. To define the subnormal
nucleus, we need the $\pi$-special characters. Let $G$ be a
$\pi$-separable group. A character $\chi \in \irr G$ is
$\pi$-special if $\chi (1)$ is a $\pi$-number and for every
subnormal group $M$ of $G$, the irreducible constituents of $\chi_M$
have determinants that have $\pi$-order. Many of the basic results
of $\pi$-special characters can be found in Section 40 of
\cite{hupte} and Chapter VI of \cite{Mawo}. One result that is
proved is that if $\alpha$ is $\pi$-special and $\beta$ is
$\pi'$-special, then $\alpha \beta$ is necessarily irreducible. We
say that $\chi$ is {\bf factored} if $\chi = \alpha \beta$ where
$\alpha$ is $\pi$-special and $\beta$ is $\pi'$-special. We also
note that if $\chi \in \Bpi G$ and $N$ is normal in $G$, then the
irreducible constituents of $\chi_N$ lie in $\Bpi N$.
%We need to define Dpi and then Cpi. We also need to talk about the
%magic character automorphism.
%Next, we prove a simple lemma regarding $\pi$-special characters
%that essentially follows from \cite{gajen} or Lemma 21.4 of
%\cite{Mawo}. We should note that this lemma is not particularly
%new.
%\begin{lemma} \label{pispec}
%Let $N \le K$ be normal subgroups of a $\pi$-separable group $G$.
%Suppose that $K/N$ is a $p$-group for some prime $p$. If $\alpha
%\in \irr N$ is $\pi$-special, then all irreducible constituents of
%$\alpha^K$ lying in $\Cpi K$ are $\pi$-special.
%\end{lemma}
%The proof needs to be changed for Cpi.
%\begin{proof}
%If $p \in \pi$, then by Lemma 21.4 of \cite{Mawo} all the
%irreducible constituents of $\alpha^K$ are $\pi$-special, and the
%result follows. Thus, we may assume that $p$ is not in $\pi$. Thus,
%$\alpha (1) o(\alpha)$ is coprime to $|K:N|$. By Corollary 6.28 of
%\cite{text}, $\alpha$ extends to $K$, and $\alpha$ has a unique
%extension $\beta$ with $o(\beta) = o (\alpha)$. By Lemma 21.4 of
%\cite{Mawo}, $\beta$ is $\pi$-special. On the other hand, we have
%by Gallagher's theorem (Corollary 6.17 of \cite{text}), the
%irreducible constituents of $\alpha^K$ have the form $\beta \nu$
%where $\nu \in \irr {K/N}$. Since $K/N$ is a $\pi'$-group, $\nu$ is
%$\pi'$-special. We now apply Lemma 5.4 of \cite{pisep} to see that
%a character in $\Bpi K$ is factored if and only if it is
%$\pi$-special. Since all the irreducible constituents of $\alpha^K$
%are $\pi$-factored, the irreducible constituents of $\alpha^K$ lying
%in $\Bpi K$ are $\pi$-special.
%\end{proof}
If $\chi \in \irr G$, Isaacs constructs the subnormal vertex as
follows. Let $M$ be maximal so that $M$ is subnormal in $G$ and the
irreducible constituents of $\chi_M$ are factored. Let $\mu$ be an
irreducible constituent of $\chi_M$ and let $T$ be the stabilizer of
$(M,\mu)$ in $G$. Isaacs proved in \cite{pisep} that there is a
Clifford theorem for $T$. In other words, there is a unique
character $\tau \in \irr {T \mid \mu}$ so that $\tau^G = \chi$. He
also proved that $(M,\mu)$ is unique up to conjugacy, and so,
$(T,\tau)$ is unique up to conjugacy. If $T = G$, then $\chi$ is
$\pi$-factored and we take $(G,\chi)$ to be the subnormal nucleus of
$\chi$. If $T < G$, then inductively, the subnormal nucleus for
$\tau$ is the subnormal nucleus for $\chi$. We write $(W,\gamma)$
for the subnormal nucleus of $\chi$, and Isaacs showed that
$\gamma^G = \chi$, $\gamma$ is factored, and $(W,\gamma)$ is unique
up to conjugacy. A character $\chi \in \irr G$ is in $\Bpi G$ if
and only if the character of its nucleus is $\pi$-special.
If $Q$ is a $\pi'$-subgroup of $G$, then we use $\Ipi {G \mid Q}$ to
denote the $\pi$-partial characters in $\Ipi G$ that have vertex
$Q$. If $\phi \in \Ipi G$ and $V \le G$, then we write $\IIpi
{\phi}VQ = \{ \eta \in \Ipi {V \mid Q} \mid \eta^G = \phi \}$. We
now find details about properties of a minimal counterexample. We
will see that a counterexample cannot occur when either $|G|$ is odd
or $2$ is not in $\pi$. Our goal is find enough information so that
we can either find a contradiction or build an example when $|G|$ is
even and $2 \in \pi$.
\begin{theorem} \label{min counter}
Let $G$ be a solvable group. Assume $\phi \in \Ipi G$ has vertex
$Q$, let $V$ be a subgroup of $G$, and let $N$ be the core of $V$ in
$G$. If $G$ and $V$ are chosen so that $|G| + |G:V|$ is minimal
subject to the condition that $|\IIpi {\phi}VQ| > |\norm GQ:\norm
VQ|$, then the following are true:
\begin{enumerate}
\item $V$ is a nonnormal maximal subgroup of $G$,
\item $|G:V|$ is a power of $2$,
\item $2 \in \pi$,
\item $Q \le V$,
\item $\phi_N = a \alpha$ for some $\alpha \in \ibr N$,
\item $\alpha (1)$ is a $\pi$-number,
\item if $K$ is normal in $G$ so that $K/N$ is a chief factor for
$G$, then $\alpha$ is fully ramified with respect to $K/N$.
\end{enumerate}
\end{theorem}
\begin{proof}
If either $V = G$ or $\IIpi {\phi}VQ$ is empty, then $|\IIpi
{\phi}VQ| \le |\norm GQ:\norm VQ|$ contradicting the hypotheses.
Thus, $V < G$ and $\IIpi {\phi}VQ$ is not empty, and so, $Q \le V$
and there exist characters in $\Ipi V$ that induce $\phi$ and have
vertex $Q$.
We begin by showing that $V$ is a maximal subgroup. Suppose that $V
< M < G$ for some subgroup $M$. Let $\IIpi {\phi}MQ = \{ \eta_1,
\dots, \eta_m \}$. Using minimality, we have $m = |\IIpi {\phi}MQ|
\le |\norm GQ:\norm MQ|$. Suppose that $\zeta \in \IIpi {\phi}VQ$,
then $\zeta^M \in \Ipi M$ and $\zeta^M$ has $Q$ as a vertex. Since
$(\zeta^M)^G = \zeta^G = \phi$, we see that $\zeta^M \in \IIpi
{\phi}MQ$. It follows that $\zeta^M = \eta_i$ for some $i$. We
conclude that $|\IIpi {\phi}VQ| \le \sum_{i=1}^m |\IIpi
{\eta_i}VQ|$. Since this contradicts our hypothesis, we obtain
$|\IIpi {\eta_i}VQ| \le |\norm MQ:\norm VQ|$. We deduce that
$$
|\IIpi {\phi}VQ| \le m|\norm MQ:\norm VQ| \le
|\norm GQ:\norm MQ||\norm MQ: \norm VQ| = |\norm GQ:\norm VQ|.
$$
Since this violates the hypotheses, $V$ is maximal in $G$.
If $V$ is normal in $G$, then either $\phi$ is induced from $V$ or
$\phi$ restricts irreducibly to $V$. If $\phi$ is induced from $V$,
then we can apply Corollary \ref{cliff count} to see that $|\IIpi
{\phi}VQ| \le |\norm GQ:\norm VQ|$ in violation of the hypotheses.
If $\phi$ restricts irreducibly, then it cannot be induced from $V$,
and we have seen that this is also a contradiction. We conclude
that $V$ is not normal in $G$.
Suppose $\alpha \in \Ipi N$ is a constituent of $\phi_N$. We use
$\phi_\alpha \in \Ipi {G_\alpha \mid \alpha}$ to denote the Clifford
correspondent for $\phi$ with respect to $\alpha$ (see Proposition
3.2 of \cite{Fong} again). Write $\{ \alpha \in \Ipi N \mid
\phi_\alpha {\textrm ~has~vertex~} Q \} = \{ \alpha_1, \dots, \alpha_k
\}$, and let $\phi_i = \phi_{\alpha_i}$ and $G_i = G_{\alpha_i}$. By
Lemma \ref{cliff count}, we know that $k = |\norm GQ:\norm {G_i}Q|$.
Suppose $\eta \in \IIpi {\phi}VQ$. Denote $\{ \beta \in \Ipi N \mid
\eta_\beta {\textrm ~has~vertex~} Q \} = \{ \beta_1, \dots, \beta_l \}$,
and let $\eta_j = \eta_{\beta_j}$ and $V_j = V_{\beta_j}$. By Lemma
\ref{cliff count}, $l = |\norm VQ: \norm {V_i}Q|$.
We see that $(\eta_j)^G = ((\eta_j)^V)^G = \eta^G = \phi$. This
implies that $(\eta_j)^{G_{\beta_j}}$ is irreducible and has vertex
$Q$. It follows that $\beta_j = \alpha_{i_j}$ for some $i_j$. We
obtain $G_{\beta_j} = G_{i_j}$ and $(\beta_j)^{G_{i_j}} =
\alpha_{i_j}$. Observe that $V_j = G_{i_j} \cap V$, and we denote
this subgroup by $V^*_{i_j}$.
Now, we assume that $k > 1$, and we start to count. We see that
$\eta \in \IIpi {\phi}GQ$ is induced by $|\norm VQ:\norm
{V^*_{i_j}}Q|$ partial characters in $\bigcup \IIpi
{\phi_i}{V^*_i}Q$. Because $G_i < G$, we may use minimality of $|G|
+ |G:V|$ to deduce $|\IIpi {\phi_i}{V^*_i}Q| \le |\norm {G_i}Q:\norm
{V^*_i}Q|$. We compute
$$
|\IIpi {\phi}VQ| = \sum_{i=1}^k \frac 1{|\norm VQ:\norm {V^*_i}Q|}
|\IIpi {\phi_i}{V^*_i}Q| \le \sum_{i=1}^k \frac 1{|\norm VQ:\norm
{V^*_i}Q|} |\norm {G_i}Q:\norm {V^*_i}Q|.
$$
We determine that
$$
\frac 1{|\norm VQ:\norm {V^*_i}Q|} |\norm {G_i}Q:\norm {V^*_i}Q| =
\frac {|\norm {G_i}Q|}{|\norm VQ|},
$$
for each $i$. Notice that
$|\norm {G_i}Q| = |\norm {G_1}Q$ for all $i$ and $k = |\norm GQ:
\norm {G_1} Q$. This yields
$$
|\IIpi {\phi}VQ| \le \sum_{i=1}k \frac {|\norm {G_1}Q}{\norm VQ} =
\frac {|\norm GQ:\norm {G_1}Q| |\norm {G_1}Q|}{|\norm VQ|} = |\norm
GQ:\norm VQ|.
$$
This contradicts the hypothesis. We deduce that $k = 1$, and
$\alpha$ is invariant in $G$.
Set $\alpha = \alpha_1$, and let $\alpha^*$ be the character in
$\Bpi N$ satisfying $(\alpha^*)^o = \alpha$. Write $(W,\hat\alpha)$
for the nucleus of $\alpha^*$, and take $T$ to be the stabilizer of
$(W,\hat\alpha)$ in $G$. By Lemma 2.3 of \cite{Laradji}, there is a
unique character $\hat\phi \in \IIpi {}T{\hat\alpha}$ so that
$\hat\phi^G = \phi$ and $Q$ is a vertex for $\hat\phi$. Similarly,
if $\eta \in \IIpi {\phi}VQ$, then there is a unique character
$\hat\eta \in \IIpi {}{T \cap V}{\hat\alpha}$ so that $\hat\eta^V =
\eta$ and $Q$ is a vertex for $\hat\eta$. Observe that $\hat\eta^T
\in \IIpi {}T{\hat\alpha}$ and induces $\phi$, so $\hat\eta^T =
\hat\phi$. It follows that $|\IIpi {\phi}VQ| = |\IIpi {\hat\phi}{T
\cap V}Q|$. If $T < G$, then we can use the minimality of $|G| +
|G:V|$ to see that $|\IIpi {\hat\phi}{T \cap V}Q| \le |\norm TQ:
\norm {V \cap T}Q|$. By the diamond lemma, we have $|\norm TQ:
\norm {V \cap T}Q| = |\norm TQ: V \cap \norm TQ| \le |\norm GQ :
\norm VQ|$. This contradicts the hypotheses, and so $T = G$.
We now have that $(W,\hat\alpha)$ is $G$-invariant. By the
construction of the subnormal, this implies that $W = N$. Since
$\alpha^* \in \Bpi N$, the nucleus for $\alpha^*$ has a character
that is $\pi$-special. Thus, $\hat\alpha$ is $\pi$-special, and
since $W = N$, we see that $\hat\alpha = \alpha^*$. In particular,
$\hat\alpha$ is $\pi$-special. We deduce that $\alpha (1)$ is a
$\pi$-number.
Take $K$ normal in $G$ so that $K/N$ is a chief factor for $G$. This
is the point where we use the fact that $G$ is solvable to see that
$G = VK$ and $V \cap K = N$ where $K/N$ is an elementary abelian
$p$-group for some prime $p$. (This is the only place we use the
hypothesis that $G$ is solvable in place of $G$ being
$\pi$-separable.)
%By Lemma \ref{pispec}, the constituents of
%$(\alpha^*)^K$ that lie in $\Cpi K$ are $\pi$-special.
%, then the irreducible constituents of ${\phi^*}_K$ lie in $\Cpi K$. Notice
%that some irreducible constituent $\delta$ of ${\phi^*}_K$ is a
%constituent of $(\alpha^*)^K$. Since $\delta$ is a constituent of
%$(\alpha^*)^K$ and $\delta \in \Bpi K$, we see that $\delta$ must be
%$\pi$-special, and so, all the irreducible constituents of
%$(\phi^*)_K$ are $\pi$-special.
%
%This needs more explanation.
%It follows that $Q \cap K$ is a Hall $\pi$-complement of $L$. Since
%$Q \cap K \le N$, it follows that $K/N$ is a $\pi$-group and $p \in
%\pi$.
%
Let $L/K$ be a chief factor for $G$. We know that $(|L:K|,|K:N|) =
1$ and $\cent {L \cap V/N}{K/N}$. (See Lemma 5.1 of \cite{max} for
a proof of this.) By Problem 6.12 of \cite{text}, either $\alpha^*$
extends to $K$ or $\alpha^*$ is fully-ramified with respect to
$K/N$. .
Suppose first that $\alpha^*$ extends to $K$. Notice that
multiplication by $\irr {K/L}$ is a transitive action on the
irreducible constituents of $(\alpha^*)^K$. Also, $(V \cap K)/L$
acts on compatibly on the irreducible constituents of $(\alpha^*)^K$
and on $\irr {K/L}$ where the action on $\irr {K/L}$ is coprime. We
can use Glauberman's lemma (Lemma 13.8 of \cite{text}) to see that
$\alpha^*$ has a $V \cap L$-invariant extension. The corollary to
Glauberman's lemma (Corollary 13.9 of \cite{text}) can be applied to
see that $\alpha^*$ has a unique $V \cap L$-invariant extension
$\delta$. Since $V$ permutes the $V \cap L$-extensions of
$\alpha^*$, it follows that $\delta$ is $V$-invariant. We now use
Corollary 4.2 of \cite{pisep} to see that restriction is a bijection
from $\irr {G \mid \beta}$ to $\irr {V \mid \alpha^*}$.
Let $\eta \in \IIpi {\phi}VQ$ so that $\eta^G = \phi$. We can find
$\eta^* \in \Bpi V$ so that $(\eta^*)^o = \eta$. Since
$({\eta^*}^G)^o = ({\eta^*}^o)^G = \eta^G = \phi \in \ibr G$, we see
that $\eta^G$ is irreducible. On the other hand, $({\eta^*}^o)_N =
(\eta_N)^o = b \alpha$ for some integer $b$. Since the irreducible
constituents of ${\eta^*}_N$ lie in $\Bpi N$, we deduce that $\eta^*
\in \irr {V \mid \alpha^*}$. But we saw that this implies that
$\eta^*$ extends to $G$. Since $V < G$, it is not possible for
$\eta^*$ to both extend to $G$ and induce irreducibly. Therefore,
we have a contradiction. We see that $\alpha^*$ (and hence,
$\alpha$) is fully ramified with respect to $K/N$. Notice that if
$p$ is not in $\pi$, then Corollary 6.28 of \cite{text} applies and
$\alpha^*$ extends to $K$. Therefore, $p \in \pi$.
%Suppose $|G|$ is odd or $2 \in \pi$, and let $\eta^* \in \Bpi V$. We
%know that $({\eta^*}^G)^\sigma = ({\eta^*}^\sigma)^G = (\eta^*)^G$
%and . It follows that $\eta^* \in \Bpi G$. We conclude that
%$(\eta^*)^G = \phi^*$.
%We need more details here
%We now suppose $2 \not\in \pi$, and we let $\eta^* \in \Dpi V$ so
%that $(\eta^*)^o$. We see that $(s_{(G:V)} \eta^*)^G = \phi^*$. In
%either case, $\phi^*$ is induced from $V$ and this is a
%contradiction.
We suppose that $p$ is odd, and we work for a contradiction. Since
$\alpha^*$ is fully-ramified with respect to $K/N$ and $|K:N|$ has
odd order, main theorem of \cite{fram} implies that no character in
$\irr {V \mid \alpha}$ induces irreducibly to $G$. (A stronger
theorem is proved in \cite{brown}.) As in the previous paragraph,
this implies that $\phi$ is not induced from $V$ which contradicts
the assumption that $\IIpi {\phi}VQ$ is not empty. (This strongly
uses the fact that $p$ is odd. When $p = 2$, it is tempting to try
use the correspondence in \cite{strong}, but that correspondence
does not preclude inducing characters in $\irr {G \mid \alpha}$ from
$V$. In fact, ${\textrm GL}_2 (3)$ is an example where this occurs.) We
conclude that $p = 2$. Since $|G:V| = |K:N|$, we see that $|G:V|$ is
a power of $2$. This proves the theorem.
\end{proof}
As a corollary, we obtain Theorem \ref{main} stated for
$\pi$-partial characters.
\begin{corollary}
Let $G$ be a solvable group. Assume either $|G|$ is odd or $2
\not\in \pi$. Let $Q$ be a $\pi'$-subgroup of $G$ and suppose that
$Q \le V$. If $\phi \in \Ipi G$, then $|\IIpi {\phi}VQ| \le |\norm
GQ:\norm VQ|$.
\end{corollary}
\begin{proof}
We suppose the result is not true. Let $G$ be a counterexample with
$|G| + |G:V|$ as in Theorem \ref{min counter}. By that result, we
have that $|G:V|$ is a nontrivial power of $2$ which is a
contradiction if $|G|$ is odd. We also have $2 \in \pi$ which is a
contradiction to $2 \not\in \pi$. This proves the corollary.
\end{proof}
\begin{thebibliography}{99}
\bibitem{preprint} J.~P.~Cossey and M.~L.~Lewis, Counting lifts of
Brauer characters, $\langle$arXiv:1007.3011v1$\rangle$
\bibitem{hupte} B. Huppert, ``Character Theory of Finite Groups,''
Walter de Gruyter, Berlin, 1998.
\bibitem{gajen} D. Gajendragadkar, A characteristic class of
characters of finite $\pi$-separable groups,
{\it J. Algebra} {\bf 59} (1979), 237-259.
\bibitem{brown} I.~M.~Isaacs, Characters of solvable and symplectic groups,
{\it Amer. J. Math.} {\bf 95} (1973), 594-635.
\bibitem{text} I.~M.~Isaacs, ``Character Theory of Finite
Groups,'' Academic Press, San Diego, California, 1976.
\bibitem{strong} I.~M.~Isaacs, Character correspondences in
solvable groups, {\it Adv. Math.} {\bf 43} (1982),
284-306.
\bibitem{fram} I.~M.~Isaacs, On the character theory of fully ramified sections,
{\it Rocky Mountain J. Math.} {\bf 13} (1983),
689-698.
\bibitem{pisep} I.~M.~Isaacs, Characters of $\pi$-separable groups,
{\it J. Algebra} {\bf 86} (1984), 98-128.
\bibitem{Fong} I.~M.~Isaacs, Fong characters in $\pi$-separable
groups, {\it J. Algebra} {\bf 99} (1986), 89-107.
\bibitem{pipart} I.~M.~Issacs, Partial characters of $\pi$-separable
groups. In: Representation Theory of Finite Groups
and Finite Dimensional Algebras (Bielefield, 1991) in Progr. Math,
vol. 95, Birkhauser, Basel, 1991, 273-287.
\bibitem{Laradji} A.~Laradji, On normal subgroups and simple modules
with a given vertex in a $p$-solvable group, {\it J. Algebra}
{\bf 308} (2007), 484-492.
\bibitem{max} M.~L.~Lewis, Characters of maximal subgroups of
$M$-groups, {\it J. Algebra} {\bf 183} (1996), 864-897.
\bibitem{Mawo} O. Manz and T. R. Wolf, ``Representation of Solvable
Groups,'' Cambridge University Press, Cambridge, 1993.
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.6384347068,
"avg_line_length": 46.8858603066,
"ext": "tex",
"hexsha": "135a1cffee6ef2d2a4b3ac7b6b2a136d91135bc7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "42eede9867e5795a6fc040b0a7ce92da3ddd3120",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "e-sim/pdf-text-extraction-benchmark",
"max_forks_repo_path": "benchmark/src/with-lang/empty-lang/1008.1633-bl.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "42eede9867e5795a6fc040b0a7ce92da3ddd3120",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "e-sim/pdf-text-extraction-benchmark",
"max_issues_repo_path": "benchmark/src/with-lang/empty-lang/1008.1633-bl.tex",
"max_line_length": 84,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "42eede9867e5795a6fc040b0a7ce92da3ddd3120",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "e-sim/pdf-text-extraction-benchmark",
"max_stars_repo_path": "benchmark/src/with-lang/empty-lang/1008.1633-bl.tex",
"max_stars_repo_stars_event_max_datetime": "2018-08-23T19:07:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-08-23T19:07:01.000Z",
"num_tokens": 9679,
"size": 27522
} |
%%!TEX TS-program = latex
%This template gives a nice gray-sober environment.
\documentclass[red]{beamer}
\usepackage{etex}
\input ../AuxFiles/PreambleSlides.tex
\usepackage[utf8]{inputenc}
\title{{\scshape Econometrics I}}
\author{{\scshape Jos\'e Luis Montiel Olea}}
\date{}
%---------------------------------------------------Begin Document-------------------------------------
\begin{document}
\setbeamerfont{alerted text}{series=\normalfont}
\setbeamercolor{alerted text}{fg=blue}
\frame{\titlepage}
\frame{
\begin{center}
\textbf{Lectures 3 and 4}
\end{center}
}
\frame{
\begin{center}
\textbf{MV Distributions, Independence, and Conditional Probability}
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Overview}}
\begin{itemize}
\justifying
\item [$\star$] Define random vectors along with multivariate c.d.f.s\\
\textcolor{gray}{(mean, covariance matrix, moment generating function)} \vspace{.5cm}
\item[$\star$] Present some useful characterizations of independence \\
\textcolor{gray}{(general definition relegated to the appendix of the notes)} \vspace{.5cm}
\item [$\star$] Introduce conditional probability and conditional expectation
\textcolor{gray}{(general definition also in the appendix of the notes)}
\end{itemize}
}
\section{Random Vectors and MV distributions}
\frame{
\begin{center}
\textbf{1. Random Vectors and MV distributions}
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Introduction}}
\begin{itemize}
\justifying
\item [$\star$] So far, we have been working with \textcolor{blue}{real-valued} random variables:
$$X: \Omega \rightarrow \R $$
\item[$\star$] Consequently, we have learned to think about statements like:
$$P_{X}(X \leq x),$$
\item [$\star$] where $x$ is some \textcolor{blue}{real} number.
\end{itemize}
}
\frame{
\frametitle{\normalsize {\scshape Motivation}}
\begin{itemize}
\item [$\star$] Econ data usually involve more than one random variable\\
\textcolor{gray}{(think about cross-sectional or time series data)} \vspace{.5cm}
\item [$\star$] \text{Thus, we will work with } \textcolor{blue}{$X_s: \Omega \rightarrow \R, \quad s \in \{1, \ldots, S\} $} \vspace{.5cm}
\item [$\star$] We will introduce the following statements: \vspace{.5cm}
\begin{enumerate}
\item Joint Probability Statements.
\item[] $$\prob_{X} \Big[ X_1 \leq x_1, \ldots, X_S \leq x_s \Big] $$
\item Conditional Probability Statements.
\item[] $$\prob_{X} \Big[ X_1 \leq x_1 \: \Big | \: X_2 \leq x_2 \Big] $$
\end{enumerate}
\end{itemize}
}
\frame{
\frametitle{\normalsize {\scshape $\R^s$-valued random variable}}
\begin{itemize}
\item [$\star$] The $\mathbb{R}^{S}$-valued mapping defined over $(\Omega, \mathcal{F})$
\item [] $$\mathbf{X}(\omega) \equiv \Big( \: X_1(\omega), \ldots, X_{S}(\omega) \: \Big)' $$
\item [] is a random vector if for all $A \in B(\mathbb{R}^{S})$
\item [] $$ \mathbf{X}^{-1}(A) \in \mathcal{F}.$$
\item [$\star$] The definition is analogous to real-valued case
\end{itemize}
}
\frame{
\frametitle {\normalsize {\scshape Multivariate Cumulative Distribution Functions}}
\justifying
\begin{itemize}
\item [$\star$] The c.d.f. of the $\mathbb{R}^{S}$ valued random vector $\textbf{X}(\omega)$ is a function
$$F_{X}: \R^S \rightarrow [0,1] $$
defined as
$$F_{X}(x_1, \ldots, x_S) \equiv \prob\{ \omega \in \Omega \: | \: X_i(\omega)\leq x_i \: \text{for all } i=1,\ldots, S. \} $$
\item [$\star$] Thus, the c.d.f. tell us how often each $X_i$ is below $x_i$.
\end{itemize}
}
\frame{
\begin{center}
We classify random vectors according to their c.d.f.s \\
(discrete and continuous)
\end{center}
}
\frame{
\frametitle {\normalsize {\scshape Absolutely Continuous Random Vector}}
\begin{enumerate}
\item [$\star$] An $\R^{S}$-valued random vector is absolutely continuous if:
$$F(x_1, x_2, \ldots x_S) = \int_{-\infty}^{x_1} \ldots \int_{-\infty}^{x_S} \textcolor{blue}{f(z_1, \ldots z_S)} dz_1 \ldots dz_S$$
\noindent for some nonnegative function $f:\R^{S} \rightarrow \R^{+}$ . \vspace{.3cm}
\item [$\star$] $f(x_1, \ldots, x_n) = \partial^n F(x_1, \ldots, x_n)/ \partial x_1 \ldots \partial x_n$ is the p.d.f. of $\mathbf{X}$.
\end{enumerate}
}
\frame{
\frametitle {\normalsize {\scshape Marginal Distributions of $\mathbf{X}$}}
$$ F_s: \mathbb{R} \rightarrow [0,1] $$
\vspace{.5cm}
$$F_s(x) \equiv \prob \Big[ \mathbf{X}^{-1} \Big( \R \times \ldots (-\infty, x ) \ldots \times \R \Big) \Big] $$
}
\frame{
\frametitle{\normalsize {\scshape From joint to marginals}}
\begin{center}
How to recover a marginal p.d.f. from a joint p.d.f?
\end{center}
\begin{center}
Just integrate variables out.
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Moments of Random Vector}}
Let $g: \mathbb{R}^{S} \rightarrow \mathbb{R}^{m}$. Write
$$g(\textbf{x})=(g_1(\textbf{x}), g_2(\textbf{x}), \ldots g_{m}(\textbf{x}))'$$
\noindent and let
\begin{eqnarray*}
\expec_{F}[g(\mathbf{X})] &=& \Big( \expec_{F}[g_1(\mathbf{X})], \expec_{F}[g_2(\mathbf{X})], \ldots, \expec_{F}[g_m(\mathbf{X})] \Big)' \\
&\equiv& \Big( \int_{\R^{S}} g_1(\mathbf{x})f(\mathbf{x})d\mathbf{x}, \ldots, \int_{\R^{S}} g_m(\mathbf{x})f(\mathbf{x})d\mathbf{x}\Big)'
\end{eqnarray*}
}
\frame{
\frametitle{\normalsize {\scshape Mean, Variance, Covariance}}
\begin{center}
$\mu \equiv \mathbb{E}[\mathbf{X}]$, $\Sigma \equiv \mathbb{E}[(\mathbf{X}-\mu)(\mathbf{X}-\mu)’]$
\end{center}
\begin{center}
$$\textrm{Cov}(X,Y) \equiv \mathbb{E}[ (X - \mu_x)(Y-\mu_y) ].$$
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Moment Generating Function of $\mathbf{X}$}}
The moment generating function of $m_{\mathbf{X}}: \R^{S} \rightarrow \R$ is given by:
$$m_{\mathbf{X}}(t) \equiv \expec_{F}[\exp(t’\mathbf{X})] \quad t \in \R^{S} $$
}
\frame{
\frametitle{\normalsize {\scshape Remarks about the m.g.f.}}
\begin{enumerate}
\justifying
\item [$\star$] Vectors with the same m.g.f. have the same joint distribution \vspace{.3cm}
\item [$\star$] Vectors with the same distribution $\forall$ linear combinations have the same joint distribution \\
\textcolor{gray}{(see Cramer-Wold Theorem in the notes and problem 2)}
\end{enumerate}
}
\frame{
\begin{center}
\textbf{Examples of Bivariate Vectors}\\
(Bivariate Normal and Bivariate Bernoulli)
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Bivariate Normal}}
\begin{enumerate}
\item [$\star$] Let $\mu \in \R^2$ and let $\Sigma$ be a p.s.d. matrix of dimension $2 \times 2$. \vspace{.5cm}
\item [$\star$] $\mathbf{X} \sim \mathcal{N}_{2}(\mathbf{\mu}, \Sigma)$, if:
$$ \expec_{F}[\exp(t'\mathbf{X})]= \exp\Big(t'\mu + \frac{1}{2} t'\Sigma t \Big).$$
\end{enumerate}
}
\frame{
\frametitle{\normalsize {\scshape Some Properties of the Bivariate Normal}}
\begin{enumerate}
\item $\mu \in \mathbb{R}^2, A \in \mathbb{R}^{2 \times 2}$.
$$\mathbf{Z} \sim \mathcal{N}_2(0, \mathbb{I}_2) \implies \mu + A\mathbf{Z} \sim \mathcal{N}_2(\mu, AA’).$$
\vspace{.1cm}
\item $\mathbf{X} \sim \mathcal{N}(\mu,\Sigma) \iff c’\mathbf{X} \sim \mathcal{N}(c’\mu, c’\Sigma c)$ for all $c \in \mathbb{R}^2$. \vspace{.7cm}
\item $\mathbf{X} \sim \mathcal{N}(\mu,\Sigma)$, $\Sigma$ invertible. The p.d.f. of $\mathbf{X}$ is:
$$ f(\mathbf{x}) = \frac{1}{ (\textrm{det}\: 2\pi \Sigma)^{1/2} } \exp \Big( -\frac{1}{2} (\mathbf{x}-\mu)'\Sigma^{-1} (\mathbf{x}-\mu) \Big).$$
\end{enumerate}
}
\frame{
\frametitle{ \normalsize \scshape{Bivariate Bernoulli}}
$$\text{Supp} = \Big\{ (0,0), \: (0,1), \: (1,0), \: (1,1) \Big\} $$
\begin{eqnarray*}
&\text{Y} \nonumber \\
& \left.\begin{array}{cc} 0 & \quad \quad 1 \end{array}\right. \nonumber\\
\text{X} \quad \left.\begin{array}{c} 0 \\ 1 \end{array}\right. & \left.\begin{array}{|c|c|}\hline \:\:p_1\:\: & \:\:p_2\:\: \\\hline \:\:p_3\:\: & \:\: p_4\:\: \\\hline \end{array}\right. \\ \nonumber
\end{eqnarray*}
$$\quad \quad \: \; p_1 + p_2 + p_3 + p_4 = 1 $$
\begin{center}
\quad (what are the marginal distributions?)
\end{center}
}
\frame{
\frametitle{ \normalsize {\scshape Remark: Joints are not `identified' by marginals}}
$$ X \sim \textrm{Bernoulli} (p_x), \quad Y \sim \textrm{Bernoulli}(p_y) $$
\begin{eqnarray*}
&\text{Y} \nonumber \\
& \left.\begin{array}{cc} 0 & \quad \quad 1 \end{array}\right. \nonumber\\
\text{X} \quad \left.\begin{array}{c} 0 \\ 1 \end{array}\right. & \left.\begin{array}{|c|c|}\hline \:\:\ p_1 \:\: & \:\:p_2 \:\: \\\hline \:\: p_3 \:\: & \:\: p_4 \:\: \\\hline \end{array}\right. \\ \nonumber
\end{eqnarray*}
\begin{eqnarray*}
p_2 + p_4 &=& p_y\\
p_3+p_4 &=& p_x \\
p_1+p_2+p_3+p_4 &=& 1\\
\end{eqnarray*}
\begin{center}
\textcolor{blue}{Solve for $p_1, p_2, p_3, p_4$}.
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Best Linear Predictor (Practice Problem)}}
\begin{itemize}
\item [$\star$] Let $X, Y$ be real-valued random variables. \vspace{.3cm}
\item [$\star$] Assume $\mu = (\mu_x, \mu_y)’$ and $\Sigma$ are known. \vspace{.3cm}
\item [$\star$] ``Predict’’ $Y$ using using a linear function of $(X-\mu_x)$:
\[ \alpha + \beta (X-\mu_x)\]
\item [$\star$] The best linear predictor minimizes expected squared error
\[ \min_{\alpha, \beta} \mathbb{E} [ (Y - \alpha -\beta (X-\mu_x))^2] \]
\item [$\star$] Show that $\alpha^* = \mu_y$, $\beta^* = \textrm{Cov}(X,Y)/ V(X)$.
\end{itemize}
}
\section{Independence}
\frame{
\begin{center}
\textbf{2. Independence}
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape (In)dependence}}
\begin{itemize}
\justifying
\item [$\star$] Important issue in the multivariate world: \vspace{.5cm}
\item []
\begin{center}How to summarize dependence or lack of dependence between random variables? \vspace{.5cm}
\end{center}
\item [$\star$] Say $X_1, \ldots, X_n$ are independent if for any $A_1, \ldots, A_n$
\[ \mathbb{P}( X_1 \in A_1, \ldots ,X_n \in A_n ) = \mathbb{P}(X_1 \in A_1)\ldots \mathbb{P}(X_n \in A_n).\]
\item [$\star$] i.e., \textcolor{blue}{joint distribution equals the product of the marginals.} \vspace{.5cm}
\end{itemize}
}
\frame{
\frametitle{\normalsize {\scshape Are $X$ and $Y$ independent?}}
\begin{eqnarray*}
&\text{Y} \nonumber \\
& \left.\begin{array}{cc} 0 & \quad \quad 1 \end{array}\right. \nonumber\\
\text{X} \quad \left.\begin{array}{c} 0 \\ 1 \end{array}\right. & \left.\begin{array}{|c|c|}\hline \:\:\ .3 \:\: & \:\: .2 \:\: \\\hline \:\: .5\:\: & \:\: 0 \:\: \\\hline \end{array}\right. \\ \nonumber
\end{eqnarray*}
}
\frame{
\frametitle{\normalsize {\scshape Useful Characterizations}}
\begin{itemize}
\justifying
\item [$\star$] Joint c.d.f is the product of the marginal c.d.f.s
$$F(X_1, \ldots, X_n) = F(X_1) \ldots F(X_n).$$
\item [$\star$] Joint p.d.f. is the product of marginal p.d.f.s
$$f(x_1, \ldots, x_n) = f(x_1) \ldots f(x_n).$$
\item [$\star$] Expectation of ``products’’ is the ``product’’ of expectations
$$\mathbb{E}[g_1(X_1), \ldots, g_n(X_n)] = \mathbb{E} [g_1(X_1)] \ldots \mathbb{E}[g_n(X_n)].$$
\item [$\star$] Joint m.g.f. is the product of the marginal m.g.f.s
$$ \mathbb{E}[ \exp (\mathbf{t}’\mathbf{X}) ] = \mathbb{E}[ \exp ( t_1 X_1 ) ] \ldots \mathbb{E}[ \exp ( t_n X_n ) ] $$
\end{itemize}
}
\frame{
\frametitle{\normalsize {\scshape Independence implies zero Covariance}}
$$\textrm{Cov}(X,Y) \equiv \expec[(X-\mu_x)(Y-\mu_y)] $$
$$ \implies $$
$$\textrm{Cov}(X,Y) \equiv \expec[XY]- \mu_x \mu_y. $$
\vspace{.5cm}
\begin{center}
Therefore $(X,Y)$ independent $\implies$ $\textrm{Cov}(X,Y)=0$.
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Does zero covariance implies independence?}}
\begin{itemize}
\item [$\star$] In general, the answer is no. Consider:
\item []
\begin{eqnarray*}
&\text{Y} \nonumber \\
& \left.\begin{array}{rr} 0 & \quad \quad 1 \end{array}\right. \nonumber\\
\text{X} \quad \left.\begin{array}{r} -1 \\ 0 \\ +1 \end{array}\right. & \left.\begin{array}{|c|c|}\hline \:\:\ 0 \:\: & \:\: 3/9 \:\: \\\hline \:\: 3/9 \:\: & \:\: 0 \:\: \\\hline \:\: 0 \:\: & \:\: 3/9 \:\: \\\hline \end{array}\right. \\ \nonumber
\end{eqnarray*}
\item [$\star$] But in some cases like multivariate normals, the answer is yes. \\
\textcolor{gray}{(I will ask you to work this out in this week’s problem set)}
\end{itemize}
}
\section{Conditional Probability}
\frame{
\begin{center}
\textbf{3. Conditional Probability and Conditional Expectation} \vspace{.5cm} \\
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Definition of the conditional probability function}}
\begin{itemize}
\item [$\star$] $P(Y \in A | x)$: Conditional probability of $Y \in A$ given $x$. \vspace{.4cm}
\item [$\star$] Defined as the \emph{function} such that
\[ \int_{B} P(Y \in A | x) f_X(x) dx = P(Y \in A, X \in B). \]
\item [$\star$] When $X,Y$ have joint p.d.f. $f$ then
\[ P(Y \in A | x) = \int_{A} \frac{f(x,y)}{f_{X}(x)} dy \]
\item [$\star$] The p.d.f. of $Y | X$ is defined as:
\[ f(y | x) \equiv \textcolor{blue}{\frac{f(x,y)}{f_{X}(x)}}. \]
\end{itemize}
}
\frame{
\frametitle{\normalsize {\scshape In our example}}
\begin{eqnarray*}
&\text{Y} \nonumber \\
& \left.\begin{array}{cc} 0 & \quad \quad 1 \end{array}\right. \nonumber\\
\textrm{X} \quad \left.\begin{array}{c} 0 \\ 1 \end{array}\right. & \left.\begin{array}{|c|c|}\hline \:\:\ p_1 \:\: & \:\:p_2 \:\: \\\hline \:\: p_3 \:\: & \:\: p_4 \:\: \\\hline \end{array}\right. \\
\end{eqnarray*}
$$P ( Y = 1 | X=1) = \frac{p_4}{p_3+p_4} $$
$$P ( Y = 1 | X=0) = \frac{p_2}{p_1+p_2} $$
}
\frame{
\frametitle{\normalsize \scshape{Conditional Expectation}}
\begin{itemize}
\justifying
\item [$\star$] If $(X,Y)$ have joint p.d.f. f(x,y):
$$\textcolor{blue}{\expec[g(Y) | x] \equiv \int g(y) \frac{f(x,y)}{f_{X}(x)} dy.} $$
\item [$\star$] Law of Iterated Expectations $\mathbb{E}[\mathbb{E}[g(Y)| x]] = \mathbb{E}[g(Y)]$
\end{itemize}
}
\frame{
\frametitle{\normalsize {\scshape Example}}
\begin{eqnarray*}
&\text{Y} \nonumber \\
& \left.\begin{array}{cc} 0 & \quad \quad 1 \end{array}\right. \nonumber\\
\text{X} \quad \left.\begin{array}{c} 0 \\ 1 \end{array}\right. & \left.\begin{array}{|c|c|}\hline \:\:\ p_1 \:\: & \:\:p_2 \:\: \\\hline \:\: p_3 \:\: & \:\: p_4 \:\: \\\hline \end{array}\right. \\ \nonumber
\end{eqnarray*}
$$\expec[Y | X=1] \equiv (0) \frac{p_3}{p_3+p_4} + (1) \frac{p_4}{p_3+p_4} $$
}
\frame{
\frametitle{\normalsize {\scshape Bivariate Normal}}
\begin{center}
In the problem set I will ask you show that if $(X,Y)$ are bivariate normal:
\end{center}
$$ Y | X \sim \mathcal{N}_1 ( \underbrace{\alpha^* + \beta^*(X-\mu_x)}_{\textrm{Best Linear Pred}} \:, \: \textrm{Var} ( \underbrace{(Y-\alpha^*-\beta^*(X-\mu_x) )}_{\textrm{Approximation Error}} ) $$
}
\frame{
\frametitle{\normalsize {\scshape Signal and Noise}}
\begin{center}
In the problem set I also ask you to consider the model
$$ \underbrace{X}_{\textrm{Noisy Measure}} = \underbrace{\theta}_{\textrm{signal}} + \underbrace{\epsilon}_{\textrm{noise}}, $$
$$ \theta \sim \mathcal{N}(\mu, \sigma^2_{\theta}), \quad \epsilon \sim \mathcal{N}(0, \sigma^2_{\epsilon}), \quad \theta \bot \epsilon $$
\end{center}
\begin{center}
and to work-out the distribution of $\theta | X $.
\end{center}
}
\section{Sums of Random Variables}
\frame{
\begin{center}
\textbf{4. Sums of Random Variables}
\end{center}
}
\frame{
\frametitle{\normalsize {\scshape Let's go back to the example}}
\begin{eqnarray*}
&\text{Y} \nonumber \\
& \left.\begin{array}{cc} 0 & \quad \quad 1 \end{array}\right. \nonumber\\
\text{X} \quad \left.\begin{array}{c} 0 \\ 1 \end{array}\right. & \left.\begin{array}{|c|c|}\hline \:\:\ p_1 \:\: & \:\: p_2 \:\: \\\hline \:\: p_3 \:\: & \:\: p_4 \:\: \\\hline \end{array}\right. \\ \nonumber
\end{eqnarray*}
\begin{center}
For any $t_1, t_2 \in \R$ define
\end{center}
$$W = t_1 X + t_2 Y $$
}
\frame{
\frametitle{\normalsize {\scshape Distribution of $t_1 X + t_2 Y$}}
\begin{enumerate}
\justifying
\item [$\star$] What is the distribution of $W$?
\item [] $$\text{Supp}=\{0,t_1, t_2, t_1+t_2\} $$
\item [] $$\textcolor{blue}{\prob_{Z}(W=w) ?} $$
\item [$\star$] Note that:
\item[] $$\prob_{Z}[W=t_1+t_2] = p_4, \quad \prob_{Z}[W=t_2]=p_2, \quad \prob_{Z}[W=t_1]=p_3$$
\item [] $$\prob_{Z}[W=0] = p_1 $$
\end{enumerate}
}
\frame{
\frametitle{\normalsize {\scshape Sums of Independent Random Variables}}
\begin{itemize}
\justifying
\item [$\star$] The distribution of $X_1 + X_2$ need not be easy to obtain \vspace{.5cm}
\item [$\star$] If $X_1$ and $X_2$ are independent and have m.g.f.s, it is
$$\mathbb{E}[ \exp (t(X_1 + X_2))] = \mathbb{E}[\exp(t X_1 )] \mathbb{E}[\exp(t X_2 )]$$
\item [$\star$] Also, if $X_1$ and $X_2$ are independent and have p.d.f.s f and g;
\[ Z = X + Y \textrm{ has p.d.f. } \int f(z-y) g(y) dy \]
\end{itemize}
}
\end{document} | {
"alphanum_fraction": 0.6298561151,
"avg_line_length": 35.1157894737,
"ext": "tex",
"hexsha": "54f3e0440807c2c89740c2fbf32490f7c70583d5",
"lang": "TeX",
"max_forks_count": 16,
"max_forks_repo_forks_event_max_datetime": "2021-09-13T12:01:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-09-05T03:12:41.000Z",
"max_forks_repo_head_hexsha": "84162de869a4c8a7ad93a7e7f3f33e741d4caeb3",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "yu45020/Courses-IntroEconometrics-Ph.D",
"max_forks_repo_path": "docs/Slides/Slides03-4.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "84162de869a4c8a7ad93a7e7f3f33e741d4caeb3",
"max_issues_repo_issues_event_max_datetime": "2019-10-15T12:46:32.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-09-07T02:05:29.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "yu45020/Courses-IntroEconometrics-Ph.D",
"max_issues_repo_path": "docs/Slides/Slides03-4.tex",
"max_line_length": 260,
"max_stars_count": 17,
"max_stars_repo_head_hexsha": "84162de869a4c8a7ad93a7e7f3f33e741d4caeb3",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "yu45020/Courses-IntroEconometrics-Ph.D",
"max_stars_repo_path": "docs/Slides/Slides03-4.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-13T12:01:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-09-10T01:31:49.000Z",
"num_tokens": 6540,
"size": 16680
} |
% File: ucore_thumips.tex
% Created: Fri Apr 06 12:00 PM 2012 C
% Last Change: Fri Apr 06 12:00 PM 2012 C
%
\documentclass[a4paper]{article}
\usepackage{graphicx}
\usepackage{indentfirst}
\usepackage{algorithm}
\usepackage{xcolor}
\usepackage{listings}
\definecolor{dkgreen}{rgb}{0,0.6,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{mauve}{rgb}{0.58,0,0.82}
\lstset{ %
basicstyle=\footnotesize, % the size of the fonts that are used for the code
numbers=none, % where to put the line-numbers
numberstyle=\footnotesize, % the size of the fonts that are used for the line-numbers
stepnumber=2, % the step between two line-numbers. If it's 1, each line
% will be numbered
numbersep=5pt, % how far the line-numbers are from the code
backgroundcolor=\color{white}, % choose the background color. You must add \usepackage{color}
showspaces=false, % show spaces adding particular underscores
showstringspaces=false, % underline spaces within strings
showtabs=false, % show tabs within strings adding particular underscores
frame=none, % adds a frame around the code
tabsize=2, % sets default tabsize to 2 spaces
captionpos=b, % sets the caption-position to bottom
breaklines=true, % sets automatic line breaking
breakatwhitespace=false, % sets if automatic breaks should only happen at whitespace
title=\lstname, % show the filename of files included with \lstinputlisting;
% also try caption instead of title
numberstyle=\tiny\color{gray}, % line number style
keywordstyle=\color{blue}, % keyword style
commentstyle=\color{dkgreen}, % comment style
stringstyle=\color{mauve}, % string literal style
escapeinside={\%*}{*)}, % if you want to add a comment within your code
morekeywords={*,...} % if you want to add more keywords to the set
}
\begin{document}
\title{UCORE Porting on MIPS32S CPU}
\author{Chen Yuheng\\ Tsinghua Unv.}
\maketitle
\section{Introduction}
This report gives a brief introduction to our project --
\emph{UCORE} porting onto the MIPS32S platform, including the kernel, limited
device drivers as well as the user-lib. \emph{UCORE} is an experimental
modern operating system that includes a basic memory manager, a limited process/
thread scheduler, linux-like VFS and incomplete POSIX-compatible syscall
interface. Currently, the following platforms are supported:
\begin{itemize}
\item \emph{Mipssim in Qemu}, which is simulated with qemu-system-mipsel.
The hardware configuration is available in qemu's source code,
several important components of which is summarized in
Table. \ref{tab:mipssim}.
\begin{table}[h]
\centering
\begin{tabular}{|r|rrr|}
\hline
Component & Type & Base Address & IRQ \\
\hline
Uart & 16450 & 0x1fd003f8 & 4 \\
Timer & On Chip & -- & 7 \\
PIC & On Chip & -- & -- \\
\hline
\end{tabular}
\caption{Mipssim Hardware Configuration}
\label{tab:mipssim}
\end{table}
\item \emph{Our FPGA MIPS32S Implementation} hardware platform.
\end{itemize}
Our code is available on Github
\footnote{https://github.com/chyh1990/ucore-thumips}
and is kept updating. Any bug reports are welcomed.
\section{Architecture of the MIPS32S Platform}
\section{Development Environment}
This section is a guide to setup and use the cross-compile development environment for Mips.
\subsection{Toolchain}
We use the standard toolchain GCC for Mips to compile ucore, even on our
MIPS32S CPU, in which only a subset of MIPS1 Instruction Set is implemented.
The best place to get a free GCC-MIPS toolchain is on CodeSourcery
\footnote{https://sourcery.mentor.com/GNUToolchain/release2189}, just make sure you download GCC 4.6. The package also includes gdb for MIPS.
Another way to get a toolchain is compiling it from source code. GCC-core 4.6.3+Binutils 2.22 is tested. Compiling GCC is tricky, just google it if you
come into any issues. Here is my configuration for GCC on 64-bit Ubuntu 12.04:
\begin{verbatim}
../gcc-4.6.3/configure --prefix=/home/guest/cpu/build-gcc/mips_gcc
--target=mips-sde-elf --disable-nls --enable-languages=c
--disable-multilib --without-headers --disable-shared
--without-newlib --disable-libgomp -disable-libssp
--disable-threads --disable-libgcc
\end{verbatim}
\subsection{Simulator}
You can run ucore-thumips with the standard Qemu in your Linux distribution:
\begin{verbatim}
qemu-system-mipsel -M mipssim -m 32M -serial stdio
-kernel obj/ucore-kernel-initrd
\end{verbatim}
But we recommend to use our modified Qemu to simulate our simplified MIPS Instruction Set
and Flash support.
what's more, it will be necessary to setup the mips-sde-elf-gdb properly, refer to
the following \emph{.gdbinit} example:
\begin{verbatim}
set endian little
set mipsfpu none
target remote 127.0.0.1:1234
file obj/ucore-kernel-initrd
\end{verbatim}
\subsection{MIPS32S Programming Guide}
MIPS32S supports a simpified MIPS32 Intruction Set, here is several important differences:
\begin{enumerate}
\item No \emph{lh/sh}
\item No \emph{divu}
\item No \emph{add/sub}, only \emph{addu/subu} is supported.
\end{enumerate}
We use some tricks to solve these problems, see \emph{kern/thumips.h}.
Another problem is that MIPS32S does NOT support delayed slot. Solving
this problem by giving GCC options correctly:
\begin{verbatim}
CFLAGS := -EL -G0 -fno-delayed-branch -Wa,-O0
\end{verbatim}
An example MIPS32S C project can be found in test1.tar.gz.
\section{Source Code Organization}
This section introduces the source code orginazation of ucore-thumips
and explains several configuration options.
\subsection{Source Tree}
Since our work is based on LAB0-LAB8, important directories are listed in Table. \ref{tab:dir}.
\begin{table}[h]
\centering
\begin{tabular}{|l|l|}
\hline
Directory & Description \\
\hline
debug & debug console after a kernel panic \\
driver & device driver interface definition \\
include & useful macros for MIPS32S \\
fs & Filesystem \\
init & kernel entry point and initialization code \\
libs & utilities \\
mm & low-level memory management \\
process & context switch \\
sync & atomic operation \\
syscall & MIPS-specific syscall machanism \\
trap & exception handling \\
\hline
\end{tabular}
\caption{ucore-thumpis directories}
\label{tab:dir}
\end{table}
\subsection{Makefile}
UCore does not have a configuration system yet, all configuration
is hand-coded in the Makefile.
\begin{enumerate}
\item \emph{GCCPREFIX}, toolchain path;
\item \emph{USER\_APPLIST}, the applications to be included.
\end{enumerate}
More detailed memory layout is defined in memlayout.h in the corresponding
machine directory, see Section. \ref{sec:mm}.
\section{Implementation Details}
This section describes the implementations of some important mechanism
in the MIPS32S CPU, which is similar to the standard MIPS32 CPU.
Thus, I refer to Harvard's OS/161\cite{OS161} project during writing the code.
\subsection{Booting}
The booting process is also simulated. Our Qemu needs the following files:
\begin{table}[h]
\centering
\begin{tabular}{|l|l|l|}
\hline
File & Type & Description \\
\hline
ucore-kernel-initrd & ELF & ucore kernel with ramdisk\\
flash.img & Binary & used for flash simulation, can be a link to the kernel\\
boot/loader.bin & Binary & BIOS, knows ELF and load kernel from Flash\\
thumips\_insn.txt & Text & Instruction Set Descriptor\\
\hline
\end{tabular}
\caption{Qemu Files}
\label{tab:qemufile}
\end{table}
There is two ways to use our modified Qemu
to boot ucoer for MIPS.
\begin{table}[h]
\centering
\begin{tabular}{|r|r|r|}
\hline
Offset & Size & Usage\\
\hline
0x00000000 & 0x80000000 & Userspace, TLB mapped\\
0x80000000 & 0x20000000 & Kernel Space, Direct mapping \\
0xA0000000 & 0x20000000 & IO Space, Direct Mapping \\
\hline
\end{tabular}
\caption{Bootable Kernel Memory Layout}
\label{tab:layout}
\end{table}
Then, the C enironment must be setup:
\begin{itemize}
\item \emph{sp}, setup the stack
\item \emph{gp}, set to \_gp (defined in ldscript)
\item zero .bss section
\end{itemize}
For convenience, root filesystem image(ramdisk) is linked
with the kernel, appending at the end of the \emph{.data} section.
\footnote{see tools/kernel.ld}.
\subsection{Exception Handling}
The most important hardware support for exception/interrupt handling on MIPS32S is the \emph{SR}
register:
\begin{figure}[h]
\centering
\includegraphics[width=0.8\linewidth]{reg-sr.png}
\caption{Status Register\cite{Sweetman:2006:SMR:1210986}}
\label{fig:reg-sr}
\end{figure}
\begin{itemize}
\item ERL, MUST be clear by software after reset;
\item EXL and KSU, MUST be set properly to handle nested exceptions.
\end{itemize}
The kernel use a trapframe structure to save registers when privilege mode transition occurs:
\begin{algorithm}[h]
\begin{lstlisting}[language={C++}]
/* $1 - $30 */
struct pushregs {
uint32_t reg_r[30];
};
/*
* Structure describing what is saved on the stack during entry to
* the exception handler.
*
* This must agree with the code in exception.S.
*/
struct trapframe {
uint32_t tf_vaddr; /* coprocessor 0 vaddr register */
uint32_t tf_status; /* coprocessor 0 status register */
uint32_t tf_cause; /* coprocessor 0 cause register */
uint32_t tf_lo;
uint32_t tf_hi;
uint32_t tf_ra; /* Saved register 31 */
struct pushregs tf_regs;
uint32_t tf_epc; /* coprocessor 0 epc register */
};
\end{lstlisting}
\caption{Trapframe}
\end{algorithm}
The trapframe is constructed by the assembly in trap/exception.S, be aware with the fact that
registers \emph{k0} and \emph{k1} is reserved by the compiler so we can use them in the exception
handler without saving them.
See \emph{kern/trap/exception.S} for more details.
\subsection{Memory Management}
\label{sec:mm}
MIPS32S has no MMUs, but has a programmable TLB unit. So
it is possible to emulate MMU utilizing the TLB miss exception.
The software simulated MMU of MIPS32S is similar to X86's,
Ucore uses the following
configuration (which is similar to X86's VA layout) :
\begin{table}[H]
\centering
\begin{tabular}{|c|c|c|}
\hline
PDT Index & PTE Index & Offset \\
\hline
10 & 10 & 12 \\
\hline
\end{tabular}
\caption{Virtual Address in ucore}
\label{tab:va_layout}
\end{table}
In our TLB miss handler, we first checkout whether it is just a TLB miss
or a page table miss by looking the address up in our emulated X86 page table.
If it is a TLB miss, check the access permission and fill it up (or raise a
access violation). If it is a page miss, call the do\_pgfault. See Alg. \ref{alg:tlbmiss}.
\begin{algorithm}[h]
\begin{lstlisting}[language={C++}]
/* use software emulated X86 pgfault */
static void handle_tlbmiss(struct trapframe* tf, int write)
{
int in_kernel = trap_in_kernel(tf);
assert(current_pgdir != NULL);
uint32_t badaddr = tf->tf_vaddr;
int ret = 0;
pte_t *pte = get_pte(current_pgdir, tf->tf_vaddr, 0);
if(pte==NULL || ptep_invalid(pte)){ //PTE miss, pgfault
//tlb will not be refill in do_pgfault,
//so a vmm pgfault will trigger 2 exception
//permission check in tlb miss
ret = pgfault_handler(tf, badaddr, get_error_code(write, pte));
}else{ //tlb miss only, reload it
/* refill two slot */
/* check permission */
if(in_kernel){
tlb_refill(badaddr, pte);
return;
}else{
if(!ptep_u_read(pte)){
ret = -1;
goto exit;
}
if(write && !ptep_u_write(pte)){
ret = -2;
goto exit;
}
tlb_refill(badaddr, pte);
return ;
}
}
exit:
if(ret){
print_trapframe(tf);
if(in_kernel){
panic("unhandled pgfault");
}else{
do_exit(-E_KILLED);
}
}
return ;
}
\end{lstlisting}
\caption{User Mode System Calling Convetion}
\label{alg:tlbmiss}
\end{algorithm}
\subsection{Context Switch}
According to MIPS32 O32 ABI, we must save the following registers
during the context switching:
\begin{algorithm}[H]
\begin{lstlisting}[language={C++}]
struct context {
uint32_t sf_s0;
uint32_t sf_s1;
uint32_t sf_s2;
uint32_t sf_s3;
uint32_t sf_s4;
uint32_t sf_s5;
uint32_t sf_s6;
uint32_t sf_s7;
uint32_t sf_s8;
uint32_t sf_gp;
uint32_t sf_ra;
uint32_t sf_sp;
};
\end{lstlisting}
\caption{Context}
\label{alg:context}
\end{algorithm}
According to O32 ABI, \emph{s0-s8} must be reserved by the callee,
\emph{gp} is a global pointer, \emph{ra} is the interrupted PC.
In addition, we must switch the kernel stack, which is saved in \emph{sf\_sp}.
\subsection{System Call}
The syscall mechanism is borrowed from Linux2.6. In MIPS, a special
instruction \emph{syscall} handles user mode to supervisor mode transition.
In ucore, we employ the following system calling convention:
\begin{enumerate}
\item \emph{a0 - a3}, arguments(from left to right)
\item \emph{v0}, syscall number
\item \emph{syscall}
\item return value in \emph{v0}
\end{enumerate}
Syscall is wrapped in user mode library and can be called as a normal
C function.
\subsection{User Library and Application}
The code in \emph{user}
should works without modification. However, since \emph{libs-user-ucore/syscall.c} is not compatible with MIPS's
system calling convetion, we use some macros from Linux
to create our own system call entries. See Alg. \ref{alg:syscall}.
\begin{algorithm}[h]
\begin{lstlisting}[language={C++}]
static inline int
syscall(int num, ...) {
va_list ap;
va_start(ap, num);
uint32_t arg[MAX_ARGS];
int i, ret;
for (i = 0; i < MAX_ARGS; i ++) {
arg[i] = va_arg(ap, uint32_t);
}
va_end(ap);
num += SYSCALL_BASE;
asm volatile(
".set noreorder;\n"
"move $v0, %1;\n" /* syscall no. */
"move $a0, %2;\n"
"move $a1, %3;\n"
"move $a2, %4;\n"
"move $a3, %5;\n"
"syscall;\n"
"nop;\n"
"move %0, $v0;\n"
: "=r"(ret)
: "r"(num), "r"(arg[0]), "r"(arg[1]), "r"(arg[2]), "r"(arg[3])
: "a0", "a1", "a2", "a3", "v0"
);
return ret;
}
\end{lstlisting}
\caption{User Mode System Calling Convetion}
\label{alg:syscall}
\end{algorithm}
Another modification is \emph{user/libs/user.ld}.
The .text section of user application is located at 0x10000000.
\section{Conclusion}
In our project, we work out a basically working version of ucore for MIPS32S.
However, just as what items listed on the TODO list imply,
much work remains for making ucore for MIPS32S pratically usable
operating system.
\bibliographystyle{plain}
\bibliography{book}
\end{document}
| {
"alphanum_fraction": 0.6987119904,
"avg_line_length": 31.3139293139,
"ext": "tex",
"hexsha": "34edf1704fdf822a334e010e292a7396807d420e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2d6888e679267d083c204afccb33a31c2928fdf1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ssryps/verilog-parser",
"max_forks_repo_path": "tests/cod19grp7/Uncommented/ucore/doc/ucore_thumips.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2d6888e679267d083c204afccb33a31c2928fdf1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ssryps/verilog-parser",
"max_issues_repo_path": "tests/cod19grp7/Uncommented/ucore/doc/ucore_thumips.tex",
"max_line_length": 151,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2d6888e679267d083c204afccb33a31c2928fdf1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ssryps/verilog-parser",
"max_stars_repo_path": "tests/cod19grp7/Uncommented/ucore/doc/ucore_thumips.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4263,
"size": 15062
} |
\chapter{Author Biography}
As of 2012, UMass Lowell calls for a ``Biographical Sketch of Author.''
The required brief biographical sketch should include the names of schools attended, the exact designation of diplomas and degrees awarded, the title and nature of any post-collegiate employment, together with the name and location of the employing organization, and a description of any previous graduate study or related research, publications, or special professional interests.
| {
"alphanum_fraction": 0.8198757764,
"avg_line_length": 80.5,
"ext": "tex",
"hexsha": "e271aa269fa11386b8d7bf4dba62710d35de8319",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9307423db695a5e05acdf2e4bbadd8a1c9cbb480",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TienanLi/umlthesis",
"max_forks_repo_path": "biography.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9307423db695a5e05acdf2e4bbadd8a1c9cbb480",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TienanLi/umlthesis",
"max_issues_repo_path": "biography.tex",
"max_line_length": 381,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9307423db695a5e05acdf2e4bbadd8a1c9cbb480",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TienanLi/umlthesis",
"max_stars_repo_path": "biography.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 91,
"size": 483
} |
%-------------------------
% Resume in Latex
% Author : Drew Johnson
% License : MIT
%------------------------
\documentclass[letterpaper,11pt]{article}
\usepackage{latexsym}
\usepackage[empty]{fullpage}
\usepackage{titlesec}
\usepackage{marvosym}
\usepackage[usenames,dvipsnames]{color}
\usepackage{verbatim}
\usepackage{enumitem}
\usepackage[hidelinks]{hyperref}
\usepackage{fancyhdr}
\usepackage[english]{babel}
\usepackage{tabularx}
\pagestyle{fancy}
\fancyhf{} % clear all header and footer fields
\fancyfoot{}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
% Adjust margins
\addtolength{\oddsidemargin}{-0.5in}
\addtolength{\evensidemargin}{-0.5in}
\addtolength{\textwidth}{1in}
\addtolength{\topmargin}{-.5in}
\addtolength{\textheight}{1.0in}
\urlstyle{same}
\raggedbottom
\raggedright
\setlength{\tabcolsep}{0in}
% Sections formatting
\titleformat{\section}{
\vspace{-4pt}\scshape\raggedright\large
}{}{0em}{}[\color{black}\titlerule \vspace{-5pt}]
%-------------------------
% Custom commands
\newcommand{\resumeItem}[2]{
\item\small{
\textbf{#1}{: #2 \vspace{-2pt}}
}
}
\newcommand{\resumeSubheading}[4]{
\vspace{-4pt}\item
\begin{tabular*}{0.97\textwidth}[t]{l@{\extracolsep{\fill}}r}
\textbf{#1} & #2 \\
\textit{\small#3} & \textit{\small #4} \\
\end{tabular*}\vspace{-5pt}
}
\newcommand{\resumeSubSubheading}[2]{
\begin{tabular*}{0.97\textwidth}{l@{\extracolsep{\fill}}r}
\textit{\small#1} & \textit{\small #2} \\
\end{tabular*}\vspace{-5pt}
}
\newcommand{\resumeSubItem}[2]{\resumeItem{#1}{#2}\vspace{-4pt}}
\renewcommand{\labelitemii}{$\circ$}
\newcommand{\resumeSubHeadingListStart}{\begin{itemize}[leftmargin=*]}
\newcommand{\resumeSubHeadingListEnd}{\end{itemize}}
\newcommand{\resumeItemListStart}{\begin{itemize}[leftmargin=0.15in]}
\newcommand{\resumeItemListEnd}{\end{itemize}\vspace{-5pt}}
%----------------------------
%--- Resume STARTS HERE ---
\begin{document}
%----------HEADING-----------
\begin{tabular*}{\textwidth}{l@{\extracolsep{\fill}}r}
\textbf{\href{https://www.drewj.dev/}{\Large Drew Johnson}} & \href{mailto:[email protected]}{[email protected]}\\
\href{https://drewj.dev/}{https://www.drewj.dev} & 404-825-2866 \\
\end{tabular*}
%-----------EDUCATION--------
\section{Education}
\resumeSubHeadingListStart
\resumeSubheading
{Georgia Institute of Technology}{Atlanta, GA}
{Bachelor of Science in Computer Science}{Aug 2013 -- Aug 2017}
\resumeSubHeadingListEnd
%-----------EXPERIENCE-------
\section{Experience}
\resumeSubHeadingListStart
\resumeSubheading
{The Home Depot}{Atlanta, GA}
{Senior Software Engineer}{May 2020 -- Present}
\resumeItemListStart
\resumeItem{Full Stack TypeScript}
{Developed suite of React frontends and Node/Express/TypeORM layered microservice APIs in TypeScript. Adhered to TypeScript best practices to maximize cross resource models and code sharing.}
\resumeItem{Test Driven Development}
{Focused heavily on TDD principles, leveraging various tools for E2E, integration, and unit testing. Integrated testing with CI/CD processes including augmented coverage reporting.}
\resumeItem{Cloud Architecture and SRE}
{Created and managed CI/CD pipelines and infrastructure for all applications both on-premise and cloud based. Engineered for high latency consumption by B2B/B2C consumers.}
\resumeItemListEnd
\resumeSubheading
{Delta Air Lines}{Atlanta, GA}
{Senior Software Engineer}{Sep 2019 -- May 2020}
\resumeItemListStart
\resumeItem{Cloud Architecture and Development}
{Guided development architecture of Delta's first AWS cloud migration project, implementing real-time data streaming from on-premise sources
following serverless and cloud native models.}
\resumeItem{Data Ingestion and Streaming}
{Developed on-premise and counterpart cloud applications/functions for real-time, multi-source customer data
ingestion, standardization, and consolidation around centralized IDs.}
\resumeItem{Data Engineering}
{Produced new and updating existing data mappings and flows for data sources and implemented distributed
database solutions accounting for scalable fault/exception tolerance.}
\resumeItem{BDD and Functional Testing}
{Automated internal unit and integration testing practices, increasing test coverage by over 75\%,
saving hundreds of monthly hours manual testing.}
\resumeItemListEnd
\resumeSubheading
{Meridian Business Services}{Overland Park, KS}
{Lead Software Engineer}{Oct 2017 -- Sep 2019}
\resumeItemListStart
\resumeItem{Development Lead}
{Lead development efforts supporting company's 2-year 500\% growth,
establishing and maintaining core engineering best practices while delivering effective and efficient code.}
\resumeItem{Web Solutions}
{Architected and developed ERP/CRM customizations for over 50 clients including extensive
third-party integrations/API development, backend workflow automations, and full-stack web-app development}
\resumeItem{CI/CD and Testing}
{Designed and developed custom CI/CD platform for custom ERP/CRM scripts, enabling development best practice
adherence and increasing effective code delivery throughput 200\%.}
\resumeItemListEnd
\resumeSubheading
{Schenck Process}{Kansas City, MO}
{Lead Software Engineer (Side Contract)}{Oct 2018 -- Apr 2019}
\resumeItemListStart
\resumeItem{Full-Stack App Design/Development}
{Architected, developed, and implemented engineering product specification/sizing tool, saving hundreds of
monthly engineer hours and preventing company liability issues.}
\resumeItem{Frontend Configurator}
{React/Redux configurator adhering to the NFPA 68 standard. Performing complex mathematical operations derived from Mathcad worksheets
and displaying solutions in real time.}
\resumeItemListEnd
\resumeSubheading
{Federal Aviation Administration}{Hampton, GA}
{Software Engineer (Side Contract)}{Mar 2018 -- Dec 2018}
\resumeItemListStart
\resumeItem{Serverless Application}
{Designed/developed scheduling PWA for use by over 100 Atlanta based FAA employees.
React/Redux frontend and Node Lambda backend with GraphQL API including role based access control.}
\resumeItem{Database Solution}
{Automated manual process of identifying facility landline emergency call location to aid in dispatch of 911 services.
Reduced telephone specialist operator effort by dozens of hours a month.}
\resumeItemListEnd
\resumeSubheading
{DSI Global}{Kansas City, MO}
{Software Developer (Side Contract)}{Dec 2017 -- Dec 2018}
\resumeItemListStart
\resumeItem{React/Redux Implementation}
{Proposed and lead implementation of React with Redux in new production applications
and refactoring of older products from plain HTML with monolithic CSS.}
\resumeItem{Backend REST API}
{Refactored RESTful API utilizing customized SQL queries resulting in 250\% reduction in client request time and requiring half the original client requests.}
\resumeItemListEnd
\resumeSubHeadingListEnd
%----------PROJECTS----------
%\section{Projects}
% \resumeSubHeadingListStart
% \resumeSubItem{NetSuite Login Chrome Extension}
% {Chrome extension to facilitate provisioning and use of shared NetSuite accounts with real-time login
% management and access control. Utilized by engineering team with around 20 active users.}
% \resumeSubItem{Project FORM}
% {React Native application utilizing pose estimation/detection models to provide automated exercise feedback designed for injury prevention.
% Both offline and cloud data processing.}
% \resumeSubHeadingListEnd
%----PROGRAMMING SKILLS------
\section{Skills}
\textbf{Languages}{: JavaScript, TypeScript, HTML/CSS, Python, Scala, Java, SQL, GraphQL}
\\
\textbf{Frameworks/Tools}{: React, Node, AWS, GCP, Gatsby/Next, Cypress, Spark, Hadoop, Kafka}
%----------------------------
\end{document} | {
"alphanum_fraction": 0.7072707864,
"avg_line_length": 41.1268292683,
"ext": "tex",
"hexsha": "83dbf343e1e70da92c5064d48aa997ca302dbcc4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "df5cf8895a92acade431a9bcd4ced2db2c8a0d3b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "DrewJohnsonGT/portfolio",
"max_forks_repo_path": "src/assets/Resume/Resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "df5cf8895a92acade431a9bcd4ced2db2c8a0d3b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "DrewJohnsonGT/portfolio",
"max_issues_repo_path": "src/assets/Resume/Resume.tex",
"max_line_length": 201,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "df5cf8895a92acade431a9bcd4ced2db2c8a0d3b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "DrewJohnsonGT/portfolio",
"max_stars_repo_path": "src/assets/Resume/Resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2071,
"size": 8431
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% The Legrand Orange Book
% LaTeX Template
% Version 2.1 (14/11/15)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% Mathias Legrand ([email protected]) with modifications by:
% Vel ([email protected])
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
% Compiling this template:
% This template uses biber for its bibliography and makeindex for its index.
% When you first open the template, compile it from the command line with the
% commands below to make sure your LaTeX distribution is configured correctly:
%
% 1) pdflatex main
% 2) makeindex main.idx -s StyleInd.ist
% 3) biber main
% 4) pdflatex main x 2
%
% After this, when you wish to update the bibliography/index use the appropriate
% command above and make sure to compile with pdflatex several times
% afterwards to propagate your changes to the document.
%
% This template also uses a number of packages which may need to be
% updated to the newest versions for the template to compile. It is strongly
% recommended you update your LaTeX distribution if you have any
% compilation errors.
%
% Important note:
% Chapter heading images should have a 2:1 width:height ratio,
% e.g. 920px width and 460px height.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass[11pt,fleqn]{book} % Default font size and left-justified equations
\usepackage[dvipsnames]{xcolor}
\usepackage{wrapfig}
\usepackage{listings}
\usepackage{textcomp}
\usepackage{smartdiagram}
\usepackage{texshade}
%----------------------------------------------------------------------------------------
\lstset{frame=tb,
language=Bash,
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,
columns=flexible,
basicstyle={\small\ttfamily},
numbers=none,
numberstyle=\tiny\color{black},
keywordstyle=\color{black},
commentstyle=\color{black},
stringstyle=\color{black},
breaklines=true,
breakatwhitespace=true,
tabsize=3
}
\input{structure} % Insert the commands.tex file which contains the majority of the structure behind the template
\newcommand{\GeneCount}{{\color{Red} Gene Count }}
\newcommand{\JunctionMake}{{\color{Blue} Junction Make }}
\newcommand{\BlastQuery}{{\color{ForestGreen} Blast Query }}
\newcommand{\ReadDepth}{{\color{Bittersweet} Read Depth }}
\newcommand{\DEEPN}{\textbf{DEEPN }}
\newcommand{\StatMaker}{{\color{Dandelion} StatMaker }}
\begin{document}
%----------------------------------------------------------------------------------------
% TITLE PAGE
%----------------------------------------------------------------------------------------
\begingroup
\thispagestyle{empty}
\begin{tikzpicture}[remember picture,overlay]
\coordinate [below=10.5cm] (midpoint) at (current page.north);
\node at (current page.north west)
{\begin{tikzpicture}[remember picture,overlay]
\node[anchor=north west,inner sep=0pt] at (0,0) {\includegraphics[width=\paperwidth]{background}}; % Background image
\draw[anchor=north] (midpoint) node [fill=ocre!30!white,fill opacity=0.6,text opacity=1,inner sep=1cm]{\Huge\centering\bfseries\sffamily\parbox[c][][t]{0.9\paperwidth}{\centering DEEPN Methods v1.1\\ % Book title
\textnormal{\sffamily\Large \textbf{D}ynamic \textbf{E}nrichment for \textbf{E}valuation of \textbf{P}rotein \textbf{N}etworks}\\[20pt] % Subtitle
{\huge Working With Yeast \hfill{Illumina Data Processing }}\\[10pt]
{\Large Tabitha Peterson \hfill{Venkatramanan Krishnamani}}\\[-1pt]
{\Large Natasha Pashkova \hfill{Mark Stamnes}}\\[-1pt]
{\Large \hfill{Robert Piper}}\\
}}; % Author name
\end{tikzpicture}};
\end{tikzpicture}
\vfill
\endgroup
%----------------------------------------------------------------------------------------
% COPYRIGHT PAGE
%----------------------------------------------------------------------------------------
\newpage
~\vfill
\thispagestyle{empty}
%\noindent Copyright \copyright\ 2014 Andrea Hidalgo\\ % Copyright notice
\noindent \textsc{This software is provided under ``The MIT License'' (MIT). \emph{See Section \ref{license}}}\\
\noindent \textsc{Department of Molecular Physiology and Biophysics, University of Iowa}\\
\noindent \textsc{https://github.com/emptyewer/DEEPN/releases}\\ % URL
\noindent This research was performed with the support of a grant awarded to Dr. Robert C. Piper by National Institute of Health (5R01GM058202).\\ % License information
\noindent \textit{First release, January 2016} % Printing/edition date
%----------------------------------------------------------------------------------------
% TABLE OF CONTENTS
%----------------------------------------------------------------------------------------
\chapterimage{chapter_head_1.pdf} % Table of contents heading image
\pagestyle{empty} % No headers
\tableofcontents % Print the table of contents itself
\cleardoublepage % Forces the first chapter to start on an odd page so it's on the right
\pagestyle{fancy} % Print headers again
%----------------------------------------------------------------------------------------
% PART
%----------------------------------------------------------------------------------------
\part{Experimental Protocols}
%----------------------------------------------------------------------------------------
% CHAPTER 1
%----------------------------------------------------------------------------------------
\chapterimage{chapter_head_1.pdf} % Chapter heading image
\chapter{DEEPN-Y2H protocol}
\section{Cloning}\index{Cloning}
\subsection{Construction of Gal4-DNA-binding domain Plasmids}\index{Construction of Gal4-DNA-binding domain Plasmids}
Any \emph{TRP1}-containing plasmid accommodating expression fusions to a myc epitope-tagged Gal4 DNA binding domain are suitable for the workflow described here. The current study uses pGBKT7 (Clonetech, Mountain View, CA), a plasmid carrying the Kanamycin-resistance gene for selection in bacteria, the \emph{TRP1} gene for selection in \emph{trp1} mutant yeast such as Y187 and PJ69-4A, and the Gal4 DNA binding domain encoded by bp 1-147 of \emph{S. cerevisiae GAL4}. DNA fragments encoding proteins of interest to be cloned downstream of the Gal4 DNA binding domain region can be made by gene synthesis using the codon bias of \emph{S. cerevisiae} as an aid to ensure high level production and as an aid for cloning (\cite{ang2016multi}). Synthetic gene fragments (gBlocks, Integrated DNA Technologies, Coralville, IA; or Strings, Thermofisher, Waltham, MA) were PCR amplified and cloned into pGBKT7 cut with EcoRI and BamHI using the method of Gibson et al and the Gibson Assembly Master Mix kit available from New England Biolabs \emph{Gibson et al., 2009)}. Resulting Kanamycin-resistant bacterial colonies were screened for the insert of interest using colony PCR and primers for the insert of interest.
\subsection{Expression of Gal4-DNA-binding domain fusion proteins}
The \emph{TRP1}-carrying bait plasmids are then transformed into PJ69-4A, a MATA strain suitable for selection of Yeast 2-hybrid (Y2H) interactions. The resulting strain can be mated to the Y187 strain that contains the yeast 2-hybrid library. Other MATA strains, such as the Y2HGold yeast strain (Clontech, Mountain View, CA) can be used to hold the \emph{TRP1}-containing bait plasmid, however, PJ69-4A showed a 20 fold better mating efficiency than other Y2H strains including the Y2HGold strain. So this protocol will not work with Y2HGold or other yeast that do not have efficient mating. Yeast transformation was performed as previously described using a Lithium Sorbitol buffer \emph{(Kawai et al., 2010)}.
To check for expression of the Gal4-DNA.Binding.Domain(DBD)-fusion protein, transformed PJ69-4A cells are grown in 1 ml of Synthetic Defined media lacking Tryptophan (SD-Trp) overnight. Dilute with 2 volumes of YPD and grow for 1 hr at 30°C. Pellet cells and resuspend in 1 ml 0.2N NaOH. After 5 min incubation at 25°C, re-pellet cells, remove the NaOH, and resuspend the pellet in 100µls TWIRL/0.8M BME sample buffer \emph{(von der Haar, 2007)}. Incubate lysate at 70°C for 5 min and analyze by SDS-PAGE and immunoblotting using anti-myc antibodies.
\section{Self-Activation Test}
\subsection{Test selection conditions for yeast 2-hybrid (Y2H) interaction}
The Gal4-DBD-fusion proteins need to be tested for conditions that will select for possible Y2H interactions. This needs to be in the context of the same diploid background that will house the bait and library prey plasmids after mating.
\begin{itemize}
\item Transform the Y187 strain with the empty vector-only “prey” plasmid. This workflow uses a plasmid (pGADT7) that expresses the Gal4-transcriptional activation domain and carries the ampicillin-resistance for bacterial growth and the LEU2 gene for selection in yeast.
\item Mate the Leu+ Y187 transformant with the Trp+ PJ69-4A transformant carrying the bait plasmid of interest by patching them together on a YPD plate. Allow the plate to grow overnight at 30$^\circ$C, before streaking yeast from the patch onto SD-Leu-Trp plate to select and isolate single colonies of PJ69-4A/Y187 MatA/$\alpha$ diploids.
\item Create a set of tester plates: SD-Leu-Trp, SD-Trp-Leu-His, SD-Leu-Trp-His with the addition of 0.1-10~mM 3AT (3-amino-triazole). Grow diploids overnight in SD-Leu-Trp, pellet and resuspend cells twice in sterile water and resuspend to OD 0.5. Serially dilute cells 1:10 in tubes or 96 well dish and spot 4 $\mu$l of each dilution onto each type of plate. Grow for 2-3 days at 30$^\circ$C.
\end{itemize}
\begin{figure}[!ht]
\centering
\includegraphics[width=\textwidth]{Exp1}
\caption{}
\label{fig:exp_fig1}
\end{figure}
\begin{remark}
The best result is to see growth in the presence but not absence of Histidine regardless of whether there is 3AT. This will allow the use of SD-Trp-Leu-His to select for yeast with a positive Y2H interaction. If there is growth on SD-Leu-Trp-His plates, then a Y2H selection can still be obtained using the lowest concentration of 3AT that prevents growth. Typically, the level of 3AT to establish a threshold of selection is between 0.1 and 1 mM. This can be observed in Figure 1. Using higher levels of 3AT or other selections that are more stringent will undermine the ability to detect Y2H interactors. If growth is observed on SD-Leu-Trp-His > 1mM 3AT a different bait plasmid should be sought.
\end{remark}
\subsection{Mating and Selection}
\begin{figure}[!ht]
\centering
\includegraphics[width=\textwidth]{Exp2}
\caption{}
\label{fig:exp_fig2}
\end{figure}
The Y187 strain does not mate well. Thus, the following optimized conditions are required to maintain complexity of the library. The overall scheme is pictured in Figure 2.
\vspace{0.1in}
\begin{enumerate}[leftmargin=0.8in]
\item[\textbf{Day 1}] Innoculate a fresh culture of the PJ69-4A strain transformed with the TRP1-containing bait plasmid in 23 mls of SD-Trp media. Thaw a vial of the Y187 cells containing the \emph{LEU2}-carrying “prey” library plasmid and inoculate a 125mls of SD-Leu media. Grow all cultures overnight at 30$^\circ$C, 200rpm.
\item[\textbf{Day 2}] The O.D. of the overnight cultures should range between 1.0 to 1.5. Pellet 21 OD equivalents of the PJ69-4A transformant cells and 15 OD equivalents of the Y187 strain carrying the library plasmids in separate 50mL conical tubes. Resuspend cells in 10ml of water and re-pellet in new 50 ml conical tube. Resuspend PJ69-4A cells and Y187 cells in 4 ml bYPDA pH 3.7, each. To set-up 4 equivalent mating reactions, add 1mL PJ69-4A cells, 1ml Y187 cells, and 1 ml bYPDA pH 3.7 to new 50 ml conical tube. Incubate at 30$^\circ$C with gentle orbital agitation (90-130rpm) for 90 min so that the cells do not fully sediment. Pellet cells, and resuspend in 2mls of bYPDA (See Appendix A). Plate all 2 mLs onto a 150 mm YPD plate.
\item[\textbf{Day 3}] Harvest cells from the YPD plates using 12 mls of SD-Leu-Trp media and place into a 50mL conical tube. Pellet cells and resuspend the cells in 40 mLs SD-Leu-Trp media. To calculate the the number of diploid cells, dilute 4µls of the diploid mixture into 200 $\mu$ls SD-Trp-Leu media and plate onto an SD-Leu-Trp plate. This plate represents a 1:10,000 fold dilution of the stock of diploids harvested and should yield 100-1000 colonies after plate is incubated at 30$^\circ$C for 2 days.
Take the remainder of each 40ml cell resuspension and inoculate a flask containing 500mls of SD-Leu-Trp media. Shake incubate these flasks at 30°C, 200rpm until they reach saturation (~2.0 OD/ml). This typically takes about 36 hours.
\end{enumerate}
\begin{remark}
To harvest cells efficienctly from YPD agar plates, scrap the cells off the YPD plates using a cell scrapper and SD-Leu-Trp media into a 50mL conical tube. This will take about 4 or 5 times of rinsing the YPD plate with 2 -3mls of SD-Leu-Trp media at a time.
\end{remark}
\begin{enumerate}[leftmargin=0.8in]
\item[\textbf{Day 4}] Monitor the growth of all flasks by checking the Optical Density.
\item[\textbf{Day 5}] At this point, the titer plates should be ready to analyze, allowing verification of sufficient mating efficiency to maintain library complexity. A minimum of 1 million total diploids is recommended for this workflow. Take 20 mls from the saturated 500 ml culture and use to inoculate flasks containing 750 mls SD-Leu-Trp media and another 20 mls to inoculate a flask containing 750 mls SD-Leu-Trp-His with the lowest level of 3AT that eliminates background growth in SD–His media. Mix the new cultures (770 mls) well and take an initial OD600. Shake incubate cultures at 30$^\circ$C, 200rpm until reaching saturation, which typically occurs in ~24 hrs for the unselected SD-Leu-Trp culture and can take up to 40+ hrs for cultures under stringent selection for Y2H interactions.
\end{enumerate}
\begin{warning}
Make sure that you mix the 500mL flask well before taking 20mls out for inoculation because yeast can sediment quickly and cell suspension needs to be homogeneous.
\end{warning}
\begin{enumerate}[leftmargin=0.8in]
\item[\textbf{Day 6}] Once the unselected SD-Leu-Trp cultures have reached saturation (OD 2.0), remove 10 mls, pellet cells, and freeze at -20$^\circ$C or continue onto DNA extraction. For the unselected SD-Leu-Trp culture, this will serve as the sample for sequencing.
\item[\textbf{Day 7}] Once the selected SD-Leu-Trp-His culture that is selecting for positive Y2H interactions has reached saturation, remove 2 mls of the culture and inoculate into 75 mls of fresh SD-Leu-Trp-His containing appropriate levels of 3AT. Allow this culture to grow at 30$^\circ$C with shaking at 200rpm until it reaches saturation, which can be followed throughout the course of growth by taking OD measurements. Saturation is typically attained within 30-60 hrs of growth.
\item[\textbf{Day 8-9}] Once the Selected SD-Leu-Trp-His cultures reach saturation (OD 2.0), remove 10 mls, pellet cells, and at this point cells can be frozen at -20$^\circ$C or continue onto the next step of DNA extraction.
\end{enumerate}
\subsection{Sample preparation for DEEPN Illumina Sequencing}
\begin{enumerate}[leftmargin=1.4in]
\item[\textbf{DNA Extraction}] Resuspend pellets of cells in 500ul of 50mMTris 20mMEDTA and transfer to a 1.5mL eppendorf tube. Add 3 $\mu$l of BME and 10 $\mu$l of Zymolase stock. Mix well and incubate in the 37°C incubator for 24-36hours. Extract with Phenol/Chloroform/Isoamyl alcohol and ethanol precipitate. Resuspend pellet in 100 $\mu$l of 50mMTris/20mMEDTA add 1 $\mu$l of RNaseA stock and incubate at 37$^\circ$C for 1hr. Ethanol precipitate and resuspend in 100 $\mu$l 5mMTris/2mMEDTA. Quantify DNA by absorbance at 280 nm.
\item[\textbf{2 PCR cDNA inserts}] Perform 2, 50$\mu$l PCR reactions per DNA sample. Each reaction contains 50 pmoles F1-primer and R1-primer (Figure 3), 25µls NEBNext High-Fidelity 2x PCR Master Mix, 5 $\mu$g of DNA sample, and water up to 50 $\mu$l. The reactions are amplified for 25 cycles with extension times of 3 min. Analyze PCR products by agarose gel electrophoresis. Samples should show smear of DNA around 1-3 kb, where the banding pattern may be found for samples where a yeast two-hybrid interaction was selected for (Figure 3). Combine duplicate PCR samples and purify using the QIAquick PCR purification kit.
\item[\textbf{Illumina sequencing}] 550ng of PCR product was sheared using a Covaris E220 to give fragments of an average length of 300bp. Indexed Sequencing libraries were generated using the KAPA Hyper Prep kit Cat No: KK8500 (KAPA Biosystems, Wilmington, MA) for Illumina sequencing that adds linkers encoding barcodes, priming sites and capture sequences asymmetrically on the ends of the DNA fragments. Indexed libraries were then pooled and sequenced using the Illumina 2x100 nt SBS v3 chemistry run on an Illumina HiSeq2500 (Illumina, Inc., San Diego, CA). The number of reads targeted was between 10 and 30 million, with more reads desired for the unselected populations that are typically more complex.
\end{enumerate}
\begin{figure}[!ht]
\centering
\includegraphics[width=\textwidth]{Exp3}
\caption{}
\label{fig:exp_fig3}
\end{figure}
%----------------------------------------------------------------------------------------
% PART
%----------------------------------------------------------------------------------------
\part{Software}
%----------------------------------------------------------------------------------------
% CHAPTER 1
%----------------------------------------------------------------------------------------
\chapterimage{chapter_head_1.pdf} % Chapter heading image
\chapter{DEEPN Software Overview}
\section{About DEEPN}\index{About DEEPN}
This user guide describes use of the standalone \DEEPN application and how to operate the modules within it.
The \DEEPN bioinformatics workflow is a collection of 4 programs
% \begin{wrapfigure}{l}{0.25\textwidth}
% \includegraphics[width=0.9\linewidth]{Pictures/gene_count.png}
% \caption{Caption1}
% \label{fig:subim1}
% \end{wrapfigure}
\includegraphics[scale=0.3]{Pictures/gene_count.png} \GeneCount counts the number of sequence reads found for every gene.\\
\includegraphics[scale=0.3]{Pictures/junction_make.png} \JunctionMake finds and identifies all the sequences corresponding to the junction sequences that span the end of the bait plasmid with the cDNA insert in the library “prey” plasmid.\\
\includegraphics[scale=0.3]{Pictures/query_blast.png} \BlastQuery allows the junction sequences to be analyzed.\\
\includegraphics[scale=0.3]{Pictures/read_depth.png} \ReadDepth calculates the read depth for a particular cDNA, useful for \\predicting the 3’ end of a cDNA insert.
\vspace{15pt}
\DEEPN was developed to process and analyze sequence information from the Illumina platform that produces 110-140 bp reads. Both single and paried-end sequences are appropriate, \DEEPN considers the different sides of a paried-end sequence as two separate sequences. \DEEPN requires sequence files in \texttt{.sam} format, in which sequences have been mapped to the genome. Processing of .fastq sequence files with \texttt{Tophat2} will work, producing unmapped and mapped \texttt{.sam} files for each fastq read file. \DEEPN requires BOTH mapped and unmapped \texttt{.sam} files to fully analyze a sequence dataset. See Section \ref{download link} for download link.
Samtools (http://goo.gl/Uhr10S), Tophat2 (https://goo.gl/16BNHo), and Bowtie2 (http://goo.gl/NwVXZ) need to be used for mapping the reads.
\DEEPN considers each sequence read separately. Thus, if Paired-End reads were collected, they need not be mapped as such by Tophat2 or Bowtie2. \DEEPN does not care. Thus, for Paried End reads that give two sample files (eg. Sample\_R1.fastq and Sample\_R2.fastq) it is easier to concantnate both the *\_R1 and *\_R2 files together and then map the merged file data as a Single End reads.
Example:
cat Sample\_R1.fastq Sample\_R2.fastq >> CombinedSample.fastq
(concatenates files)
./tophat2 -o TophatOutput/ -p 11 ./indexes/mm10
(maps reads)
samtools view -h -o CombinedSample.unmapped ./TophatOutput/unmapped.bam
(converts .bam file of unmapped reads to .sam file)
samtools view -h -o CombinedSample.mapped ./TophatOutput/accepted\_hits.bam
(converts .bam file of mapped reads to .sam file)
\begin{remark}
Later releases of DEEPN for Mac will also contain functions to automatically map \texttt{.fastq} files with Tophat2 to allow for seamless integration of processing sequence data.\\
\texttt{.fastq} $\,\to\,$ Tophat2/Bowtie $\,\to\,$ \GeneCount $\,\to\,$ {\color{Blue} Junction Make} $\,\to\,$ {\color{ForestGreen} Blast Query} $\,\to\,$ {\color{Bittersweet} Read Depth}.
\end{remark}
The \DEEPN application provides a graphic user interface to guide the launch and operation of \GeneCount, \JunctionMake, \BlastQuery, or \ReadDepth modules within it. \DEEPN comes in versions that run on Windows and Mac operating systems. See Section \ref{download link} for download link.
\clearpage
\section{Contents within DEEPN}\index{Contents within DEEPN}
\begin{enumerate}
\item Program modules
\begin{itemize}
\item {\color{Red} Gene Count}
\item {\color{Blue} Junction Make}
\item {\color{ForestGreen} Blast Query}
\item {\color{Bittersweet} Read Depth}
\end{itemize}
which are launched from within the main DEEPN.app or DEEPN.exe.
\item Databases for the Gene and mRNA coordinates
\begin{itemize}
\item mouse mm10 genome and mouse RefSeq data
\item Gene and ORF coordinates for the SacCer3 genome
\end{itemize}
These allow analysis of mouse cDNA Y2H libraries and yeast genomic Y2H libraries. The modified mouse and human RefSeq database that \DEEPN uses contains just the known annotated mRNAs, basically the entries that have an \texttt{NM\_*} nomenclature in genbank. It does not contain microRNAs, long non-coding RNAs, and theoretical splice variants. \DEEPN contains a database of yeast genes, with a hybrid nomenclature of their systematic SGD name and their genebank \texttt{NM\_*} nomenclature. For simplicity, a given yeast gene consists of the protein coding sequence flanked by 100 bp of untranslated region.
\item Database of ``junction tags'' for different libraries. Currently, analysis of the mouse cDNA libraries defaults to the use of the Clontech mate/plate pGADT7 plasmid and uses the GRCm38/mm10. This is also the case for analysis of human cDNA library that uses the GRCh38/hg38 reference genome data And analysis of the yeast libraries is tied to the Phil James libraries housed in pGAD-C1, C2, and C3 \emph{(James et al., 1996)}. The yeast Y2H library analysis uses the sacCer3(April 2011) database. These default junction tags from these libraries are:
\begin{itemize}
\item cDNA insert (mouse and human)
\begin{lstlisting}
AATTCCACCCAAGCAGTGGTATCAACGCAGAGTGGCCATTACGGCCGGGG
\end{lstlisting}
\item genomic fragment insert (\emph{S. cerevisiae})
\begin{lstlisting}
ATGATGAAGATACCCCACCAAACCCAAAAAAAGAGATCGAATTCCCGGGG
\end{lstlisting}
\end{itemize}
\begin{remark}
Users can insert their own junction sequence into the \DEEPN dialog box if using a different library. For a more permanent solution, users can modify the SQL database that houses these data (see below)
\end{remark}
\item \DEEPN operates the \texttt{blastn} program while its processing data. That is called upon by the \JunctionMake program. All of the relevant files required to blast search mouse mRNAs or yeast genes are included in internal resources. Stand-alone Blastn program and associated databases to perform \texttt{blastn} locally from within the \DEEPN application.
\end{enumerate}
\section{DEEPN WorkFlow Overview}\index{DEEPN Workflow Overview}
\begin{center}
\smartdiagram[descriptive diagram]{
{Select \textbf{Work Folder}, Locate a folder in your computer where you like the analysis to be performed.},
{Place \texttt{.sam} files, Place .sam files within the \textbf{mapped\_sam\_files} and \textbf{unmapped\_sam\_files} subfolders within the selected \textbf{Work Folder}.},
{Process Data, Use \GeneCount and \JunctionMake to process data. This will create several subfolders containing the processed data},
{Analyze Data, {Using \BlastQuery and \ReadDepth}},
}
\end{center}
Step-by-step screen-shots and instructions are detailed in the following chapters.
%------------------------------------------------
\section{Installation}\index{Installation}
\subsection{Download Link}\label{download link}\index{Download Link}
Platform-specific compiled binaries (\emph{Mac OS X, Windows and Linux}) of \textbf{DEEPN} can be downloaded from the below URL. \\
\texttt{\href{https://github.com/emptyewer/DEEPN/releases}{https://github.com/emptyewer/DEEPN/releases}}
\subsection{Mac OS X Compatibility}\index{Mac OS X Compatibility}\label{mac_install}
\texttt{Mac OS X (10.10+) Yosemite and above}
\subsection{Windows Compatibility}\index{Windows Compatibility}\label{windows_install}
\texttt{64-bit or 32 bit Windows 7 and above. Note that \DEEPN itself is a 32-bit software.}
\subsection{Linux Compatibility}\index{Linux Compatibility}\label{linux_install}
\texttt{Scheduled for release in Version 2.0 of DEEPN.}
\clearpage
\section{Open Source License}\index{Open Source License}\label{license}
\begin{lstlisting}
The MIT License (MIT)
Copyright (c) 2016 Venkatramanan Krishnamani, Robert C. Piper, Mark Stammnes
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
\end{lstlisting}
\chapter{Initial Setup}
\section{Preprocessing \texttt{.fastq files}}\index{Preprocessing \texttt{.fastq files}}
The current DEEPN application requires that \texttt{.sam} files have been generated from the \texttt{.fastq} Illumina sequence files. This is done using the mapping program \texttt{Tophat2}, which itself runs on top of \texttt{Bowtie}. It is imperative that downstream processing by DEEPN uses the same databases that \texttt{Tophat2} uses to map the sequence files. These are...
\begin{enumerate}
\item Mouse: mm10\/GRCm38 2011 \emph{Mus musculus} assembly (Genome Reference Consortium Mouse Build 38 (\texttt{GCA\_000001635.2})\\
\texttt{\href{https://goo.gl/T6OT2F}{https://goo.gl/T6OT2F}}
\item Human: hg38\/GRCh38 2013 \emph{Mus musculus} assembly (Genome Reference Consortium Human Build 38 (\texttt{GCA\_000001405.15})\\
\texttt{\href{https://goo.gl/xWgczW}{https://goo.gl/xWgczW}}
\item Yeast: sacCer3 2011 \emph{Saccharomyces cerevisiae} S288c assembly from Saccharomyces Genome Database (\texttt{GCA\_000146055.2})\\
\texttt{\href{https://goo.gl/wfPbvA}{https://goo.gl/wfPbvA}}
\end{enumerate}
\texttt{Tophat2} should produce sets of .sam files of Mapped Reads and Unmapped Reads for every input .fastq file. DEEPN will use both of these files.
\section{Initializing DEEPN}\index{Initializing DEEPN}
\subsection{Launching}\index{Launching DEEPN}
Open the \DEEPN application by double clicking. This opens a window (DEEPN) that can be used to run the other modules.
\begin{itemize}
\item[\textbf{Step 1.}] \textbf{Select Parameters from the list menu in the top. Figure \ref{fig:deepn_main_window}}
\begin{itemize}
\item Selecting the \emph{M. musculus} option selects the mm10 mouse databases
\item Selecting the Human option selects the hg38 Human databases
\item Selecting the \emph{S. cerevisiae} option selects the sacCer3 yeast databases
\item Once this is selected, the “Select Work Folder” will be activated for use
\end{itemize}
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{figure1}
\caption{\DEEPN main interface.}
\label{fig:deepn_main_window}
\end{figure}
\item[\textbf{Step 2.}] \textbf{Create a work folder Figure \ref{fig:select_work_folder}}
\begin{itemize}
\item \DEEPN needs a folder to write its files into and to read sequence files from. This is done using the ``Select Work Folder'' button \includegraphics[width=80pt]{Pictures/work_folder_btn}. Here create a new folder or select an existing one.
\item Once your Work Folder is designated, \DEEPN will need to operate from two subfolders within it (See Figure \ref{fig:select_work_folder}). These folders are called...
\begin{itemize}
\item \texttt{mapped\_sam\_files}
\item \texttt{unmapped\_sam\_files}
\end{itemize}
\item If these folders already exist within the ``Work Folder'' because of previous processing, then \DEEPN will use them.
\item If the ``Work Folder'' is new and those folders do not exist, \DEEPN will create them.
\end{itemize}
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{work_folder}
\caption{Folder with two subfolders, named \texttt{mapped\_sam\_files} and \texttt{unmapped\_sam\_files} is the starting state of \DEEPN work folder before processing.}
\label{fig:select_work_folder}
\end{figure}
\item[\textbf{Step 3.}] To start things off, move your \texttt{.sam} files generated by \underline{Tophat2} into the \texttt{mapped\_sam\_files} and \texttt{unmapped\_sam\_files} folders.
\end{itemize}
\includegraphics[scale=0.3]{Pictures/gene_count.png} \GeneCount module will process the \texttt{.sam} files placed in the \texttt{mapped\_sam\_files} folder. These files should contain the mapped read files outputted from \underline{Tophat2}\\
\includegraphics[scale=0.3]{Pictures/junction_make.png} \JunctionMake module will process the \texttt{.sam} files placed in the \texttt{unmapped\_sam\_files} folder. These files should contain the \underline{UNmapped} read files outputted from \underline{Tophat2}. These are the reads that were unable to to mapped adequately to the \textbf{SacCer3} or the \textbf{Mm10} genomes and that contains the bulk of junction reads. With Illumina 110-120 bp reads, the stretch of cDNA or gene DNA in these ``Junction sequences'' is too short to be mapped to a chromosome by \underline{Tophat2}. This workflow assumes these types of short reads. Were one to have longer reads, the Junction Sequences might be able to be mapped, which would oblige the search for them to included the Mapped reads as well.\\
\begin{itemize}
\item Once \texttt{.sam} files are placed within \texttt{mapped\_sam\_files}, the \includegraphics[width=120pt]{Pictures/gene_count_btn} button is activated and the \GeneCount processing can begin by clicking the button.
\item Once \texttt{.sam} files are placed within \texttt{unmapped\_sam\_files}, the \includegraphics[width=120pt]{Pictures/junction_make_btn} button is activated and the \JunctionMake processing can begin by clicking the button.
\end{itemize}
\begin{warning}
A warning message may appear if \DEEPN detects folders created by previous processing runs. \DEEPN will add to these folders, but users run the risk that if file names are the same, the old files will be written over by the new files. To avoid any problems, one can move the processed data folders to a new location.
\end{warning}
%----------------------------------------------------------------------------------------
% PART
%----------------------------------------------------------------------------------------
\part{Processing Data}
%----------------------------------------------------------------------------------------
% CHAPTER 3
%----------------------------------------------------------------------------------------
\chapterimage{chapter_head_1.pdf} % Chapter heading image
\chapter{\GeneCount}\index{Gene Count}
\GeneCount will process all the \texttt{.sam} files that are in the folder \texttt{mapped\_sam\_files}
\vspace{15pt}
Once the \texttt{.sam} files are moved to this folder, click the \includegraphics[width=120pt]{Pictures/gene_count_btn} button.
\begin{remark}
Clicking the \includegraphics[width=120pt]{Pictures/gene_count_btn} button will only be possible if there are files in \texttt{mapped\_sam\_files} folder.
\end{remark}
After starting, \GeneCount will report to you the following:
\begin{lstlisting}
>>>GENEcountY2H
Gene Count will process the mapped .sam files present in the folder mapped_sam_files
Gene Count will generate two folders for its output data:
gene_count_summary contains a summary files of genes and their count frequency.
chromosome_files contains more granular data for each gene.
Be patient....This program is slow but will keep you posted.
>>>END
\end{lstlisting}
\GeneCount will populate the \texttt{gene\_count\_summary} and \texttt{chromosome\_files} folders with files that have names corresponding to the input files.
For an input file named \textbf{\texttt{Dataset1.sam}}
\begin{itemize}
\item The \texttt{gene\_count\_summary} folder will contain \textbf{\texttt{Dataset1\_summary.csv}}
\item The \texttt{chromosome\_files} folder will contain \textbf{\texttt{Dataset1\_ChrGene.csv}}
\end{itemize}
\vspace{15pt}
The \texttt{\_summary.csv} files generated by \GeneCount have the following format when opened in \texttt{Microsoft Excel}. See Figure \ref{fig:excel_screen_shot}.
\begin{itemize}
\item The name of the \texttt{.sam} file processed is found along the top.
\item \textbf{Column A} shows Chromosome on which each gene is located
\item \textbf{Column B} shows gene name
\item \textbf{Column C} shows the frequency of reads for that gene in parts per million (PPM)
\item \textbf{Columns D and greater} show corresponding NCBI genbank accession numbers that describe annotated mRNA sequences for that gene
\end{itemize}
Shown are the ``TotalReads'' that were found in the starting \texttt{.sam} file (Total Mapped Reads). Also shown are the ``TotalReads(PPM)'' that were used in the PPM calculation (TotalReads that were used in the PPM calculation). TotalReads is often significantly larger than TotalReads(PPM) because many of the mapped reads cannot be assigned to a particular gene. This is the case with the genomic libraries that have lots of fragments that do not encode exons. It also is the case with cDNA libraries that have splice variants and other sequences that do not correspond to what is currently annotated as an exon for that corresponding gene.
\begin{remark}
TotalReads(PPM)) is the number of reads that were found to have a position corresponding to a known exon as annotated in the corresponding database used. Since some exons have yet to be annotated, some of the reads may not be able to be assigned to a particular gene, which accounts for the discrepancy between TotalReads(PPM) and TotalReads.
\end{remark}
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{excel}
\caption{Screen-shot of \texttt{\_summary.csv} file generated by \GeneCount.}
\label{fig:excel_screen_shot}
\end{figure}
\chapter{\JunctionMake}\index{Junction Make}
\JunctionMake will process all the \texttt{.sam} files that are in the folder \texttt{unmapped\_sam\_files}.
\vspace{15pt}
Once the \texttt{.sam} files are moved to this folder, click the \includegraphics[width=120pt]{Pictures/junction_make_btn} button.
\begin{remark}
Clicking the \includegraphics[width=120pt]{Pictures/junction_make_btn} button will only be possible if there are files in \texttt{unmapped\_sam\_files} folder.
\end{remark}
\JunctionMake will report to you the following:
\begin{lstlisting}
>>>Comment1
- Make sure all ".SAM" files from your UNmapped reads are in the folder:
unmapped_sam_files
- This program will scan for junction sequences that span the Gal4 activation domain and the prey.
- The junction tag sequence used will be the one entered in the Junction Sequence textbox
- Output files will be placed in the junction_files folder as .junctions.txt files.
- Blast identified reads will be placed in the blast_results folder as .blast.txt files
- A database of identified junctions will be placed in the blast_results_query folder as .p files
>>>END
\end{lstlisting}
\JunctionMake will make the \texttt{junction\_files}, \texttt{blast\_results}, and \texttt{blast\_results\_query} folders to accept the new files it will produce. If these folders already exist, a warning will be issued to alert the user that files might be overwritten if \JunctionMake is run again. To avoid this, move the files out of the \texttt{junction\_files}, \texttt{blast\_results}, and \texttt{blast\_results\_query} folders to a new place or Abort, rename the \texttt{junction\_files}, \texttt{blast\_results}, and \texttt{blast\_results\_query} folders and start \JunctionMake again.
\JunctionMake will look for different junction ``tag'' sequence. When different \texttt{Gal4AD-} libraries are used, here is where some user input may be necessary.
\vspace{15pt}
For the \textbf{clontech mate and plate library} the junction ``tag'' sequence looked for is:
\begin{lstlisting}
AATTCCACCCAAGCAGTGGTATCAACGCAGAGTGGCCATTACGGCCGGGG
\end{lstlisting}
So Junction Sequences look like this for the mouse cDNA Mate/Plate pGADT7 library (Figure \ref{fig:cdna-junction}):
%\begin{lstlisting}
%AATTCCACCCAAGCAGTGGTATCAACGCAGAGTGGCCATTACGGCCGGGG||tcg-gac-aac-gca
%\end{lstlisting}
\begin{figure}[!ht]
\centering
\begin{texshade}{cdna.aln}
\residuesperline*{40}
% \vblockspace{-0.1in}
\charstretch{1.5}
\linestretch{1.5}
% \hidel‘egend
%
\nameseq{1}{}
\nameseq{2}{}
\shadingmode{functional}
\hideconsensus
\hidenumbering
\shaderegion{1}{51..66}{Black}{LightYellow}
% \shaderegion{1}{72..74}{White}{Red}
% \shaderegion{2}{74..76}{White}{Red}
% \tintblock{1}{73..74}
%
% \shaderegion{2}{1..7}{Black}{LightYellow}
% \shaderegion{2}{66..72}{Black}{LightYellow}
% \shaderegion{2}{10..13}{Black}{LightProcessBlue}
% \shaderegion{2}{22..25}{Black}{LightProcessBlue}
% \shaderegion{2}{27..31}{Black}{LightLimeGreen}
% \shaderegion{2}{39..43}{Black}{LightLimeGreen}
% \shaderegion{2}{49..53}{Black}{LightLavender}
% \shaderegion{2}{61..65}{Black}{LightLavender}
%
% \shaderegion{1}{1..7}{Black}{LightYellow}
% \shaderegion{1}{64..71}{Black}{LightYellow}
% \shaderegion{1}{10..12}{Black}{LightProcessBlue}
% \shaderegion{1}{22..24}{Black}{LightProcessBlue}
% \shaderegion{1}{26..30}{Black}{LightLimeGreen}
% \shaderegion{1}{38..42}{Black}{LightLimeGreen}
% \shaderegion{1}{47..51}{Black}{LightLavender}
% \shaderegion{1}{59..63}{Black}{LightLavender}
%
\feature{ttop}{1}{51..66}{---[Red]}{Downstream Reading Frame}
% \feature{bbottom}{2}{74..76}{---[Red]}{}
\feature{ttop}{1}{1..50}{---[RoyalBlue]}{Junction Sequence}
% \feature{bbottom}{2}{34..36}{---[RoyalBlue]}{}
\end{texshade}
\caption{Junction Sequences look like this for the mouse cDNA Mate/Plate pGADT7 library}
\label{fig:cdna-junction}
\end{figure}
\vspace{15pt}
For the \textbf{yeast genomic Phil James (pGAD-C1,2,3) library} the junction ``tag'' sequence looked for is:
\begin{lstlisting}
ATGATGAAGATACCCCACCAAACCCAAAAAAAGAGATCGAATTCCCGGGG
\end{lstlisting}
So junction sequences look like this for the pGAD-C yeast genomic library (Figure \ref{fig:gene-junction}):
% \begin{lstlisting}
% ATACCCCACCAAACCCAAAAAAAGAGATCGAATTCCCCGGGGGATCCATC||ggc-gaa-aac-gaa
% \end{lstlisting}
\begin{figure}[!ht]
\centering
\begin{texshade}{gene.aln}
\residuesperline*{40}
% \vblockspace{-0.1in}
\charstretch{1.5}
\linestretch{1.5}
% \hidel‘egend
%
\nameseq{1}{}
\nameseq{2}{}
\shadingmode{functional}
\hideconsensus
\hidenumbering
\shaderegion{1}{51..66}{Black}{LightYellow}
% \shaderegion{1}{72..74}{White}{Red}
% \shaderegion{2}{74..76}{White}{Red}
% \tintblock{1}{73..74}
%
% \shaderegion{2}{1..7}{Black}{LightYellow}
% \shaderegion{2}{66..72}{Black}{LightYellow}
% \shaderegion{2}{10..13}{Black}{LightProcessBlue}
% \shaderegion{2}{22..25}{Black}{LightProcessBlue}
% \shaderegion{2}{27..31}{Black}{LightLimeGreen}
% \shaderegion{2}{39..43}{Black}{LightLimeGreen}
% \shaderegion{2}{49..53}{Black}{LightLavender}
% \shaderegion{2}{61..65}{Black}{LightLavender}
%
% \shaderegion{1}{1..7}{Black}{LightYellow}
% \shaderegion{1}{64..71}{Black}{LightYellow}
% \shaderegion{1}{10..12}{Black}{LightProcessBlue}
% \shaderegion{1}{22..24}{Black}{LightProcessBlue}
% \shaderegion{1}{26..30}{Black}{LightLimeGreen}
% \shaderegion{1}{38..42}{Black}{LightLimeGreen}
% \shaderegion{1}{47..51}{Black}{LightLavender}
% \shaderegion{1}{59..63}{Black}{LightLavender}
%
\feature{ttop}{1}{51..66}{---[Red]}{Downstream Reading Frame}
% \feature{bbottom}{2}{74..76}{---[Red]}{}
\feature{ttop}{1}{1..50}{---[RoyalBlue]}{Junction Sequence}
% \feature{bbottom}{2}{34..36}{---[RoyalBlue]}{}
\end{texshade}
\caption{Junction sequence for the pGAD-C yeast genomic library.}
\label{fig:gene-junction}
\end{figure}
If you need to use another \textbf{Junction Sequence} you can do so by pasting it directly into the textbox labeled ``junction sequence''
\begin{remark}
A new junction sequence should be
\begin{itemize}
\item UPPER case and be 50~nt long.
\item Be immediately upstream of the cDNA/fragment fusion site
\item Have the last 3 nt define a complete codon for the preceding reading frame. In the examples above the last 3 nucleotides define the operative frame (GGG $\,\to\,$ Glycine or ATC $\,\to\,$ Isoleucine)
\end{itemize}
\end{remark}
\JunctionMake takes the 50~nt sequence and creates 3 sequences ``tag'' from it. It will then make a new file of all the reads that contain one or more of these sequence tags. These lists can be found in the folder \texttt{junction\_files}. Consider the following sequence:
\begin{figure}[!ht]
\centering
\begin{texshade}{seq1.aln}
\residuesperline*{45}
% \vblockspace{-0.1in}
\charstretch{1.5}
\linestretch{1.5}
\nameseq{1}{}
\nameseq{2}{}
\shadingmode{functional}
\hideconsensus
\hidenumbering
\feature{ttop}{1}{9..23}{---[RoyalBlue]}{Junction Seq Tag}
\feature{ttop}{1}{24..50}{---[Red]}{Downstream Reading Frame}
\end{texshade}
\end{figure}
\JunctionMake will first search for the tag \texttt{AGACAACGGCCGGGG}.
Once found it will determine the Downstream Reading Frame (the fusion point of the cDNA), which would be: \texttt{AAACCCGGGAAACCCGGGA}.
\vspace{10pt}
Sometimes that there is a cloning mismatch in where a cDNA is inserted into the library. This could be a base substitution or a missing nucleotide. To compensate, \JunctionMake will also look for a 15~bp upstream sequence 4~nt back from what it first looked for. It only does this if the read in question does not have a perfect match to the primary junction sequence query above.
This looks like the following:
\begin{figure}[!ht]
\centering
\begin{texshade}{seq2.aln}
\residuesperline*{45}
% \vblockspace{-0.1in}
\charstretch{1.5}
\linestretch{1.5}
\nameseq{1}{}
\nameseq{2}{}
\shadingmode{functional}
\hideconsensus
\hidenumbering
\feature{ttop}{1}{5..19}{---[RoyalBlue]}{Junction Seq Tag}
\feature{ttop}{1}{20..23}{---[Yellow]}{}
\feature{ttop}{1}{24..50}{---[Red]}{Downstream Reading Frame}
\end{texshade}
\end{figure}
The sequence to be searched for is: \texttt{TTCCAGACAACGGCC}
The Downstream Reading Frame returned remains: \texttt{AAACCCGGGAAACCCGGGA}
\vspace{10pt}
We have even found this does not fully capture all the junction sequences there are so \JunctionMake will do the same thing again, going back another another 4~nt to yield.
\begin{figure}[!ht]
\centering
\begin{texshade}{seq3.aln}
\residuesperline*{45}
% \vblockspace{-0.1in}
\charstretch{1.5}
\linestretch{1.5}
\nameseq{1}{}
\nameseq{2}{}
\shadingmode{functional}
\hideconsensus
\hidenumbering
\feature{ttop}{1}{1..15}{---[RoyalBlue]}{Junction Seq Tag}
\feature{ttop}{1}{16..23}{---[Yellow]}{}
\feature{ttop}{1}{24..50}{---[Red]}{Downstream Reading Frame}
\end{texshade}
\end{figure}
The sequence to be searched for is: \texttt{AACGTTCCAGACAAC}
The Downstream Reading Frame returned remains: \texttt{AAACCCGGGAAACCCGGGA}
\vspace{15pt}
\JunctionMake will generate these 3 junction tags that it will look for an apprise you of its status by stating
\begin{lstlisting}
>>>Comment2
The primary, secondary, and tertiary sequences that will be searched for are:
1st sequence
2nd sequence
3rd sequence
>>>END
\end{lstlisting}
\JunctionMake will then notify you that it has started to search the \texttt{.sam} files using
\begin{lstlisting}
>>>Comment3
Junction Make is searching .sam files for the junctions that span the GAL4-AD and library insert
The next step converts files to a FASTA file format used for blastn search
The FASTA files are temporary \_TEMP.fa files are located in the blastResults folder
\_TEMP.fa files are being converted into blast.txt files that contain the blastn results for each junction.
This is done by searching each sequence against the reference cDNA database using blastn.
This takes a while...
>>>END
\end{lstlisting}
During this time, \JunctionMake runs a \texttt{blastn} search of each of the junction reads against a database of annotated RefSeq mRNAs or yeast genes. Results from this blast search are found in the ``blast\_results'' folder. Files from this folder are then used to create a searchable format that can be used by the analysis program \BlastQuery. \JunctionMake creates a Python dictionary ``.p'' file for each dataset are stores this in the \texttt{blast\_results\_query} folder.
\begin{remark}
The yeast genomic Y2H library searches work for the Phil James pGAD-C1, C2, and C3 libraries. This is actually a set of 3 libraries that have in/dels near the genomic DNA insertion site that allows sampling of all three frames. Thus,the junction sequence used for searching these libraries uses a set-back point before these libraries diverge.
\begin{itemize}
\item C1: {\ttfamily ATGAAGATACCCCACCAAACCCAAAAAAAGAGATCGAATTCCCCGGG{\bfseries GGATCC}}
\item C2: {\ttfamily ATGAAGATACCCCACCAAACCCAAAAAAAGAGATCGAATTCCC{\bfseries G}GGG{\bfseries ATCC}}
\item C3: {\ttfamily ATGAAGATACCCCACCAAACCCAAAAAAAGAGATCGAATTCCC{\bfseries G}GGG{\bfseries GATCC}}
\end{itemize}
By using the consensus junction sequence:
\begin{itemize}
\item ATGAAGATACCCCACCAAACCCAAAAAAAGAGATCGAATTCCCCGGG
\end{itemize}
\JunctionMake breaks up this sequence to make smaller junction tags it searches for. So for this sequence, \JunctionMake finds the following junction tags as it moves through the data
\begin{itemize}
\item 1st Tag: {\ttfamily ATGAAGATACCCCACCAAACCCAAAAA{\color{RoyalBlue}{\bfseries AAGAGATCGAATTCCCGGGG}}}
\item 2nd Tag: {\ttfamily ATGAAGATACCCCACCAAACCCA{\color{RoyalBlue}{\bfseries AAAAAAGAGATCGAATTCCC}}GGGG}
\item 3rd Tag: {\ttfamily ATGAAGATACCCCACCAAA{\color{RoyalBlue}{\bfseries CCCAAAAAAAGAGATCGAAT}}TCCCGGGG}
\end{itemize}
The first Tag will match pGAD-C2 and -C3, whereas the second and third Tags will match all -C1, -C2 and -C3.
\end{remark}
\chapter{Running \GeneCount \& \JunctionMake}\index{Running Gene Count \& Junction Make}
The \DEEPN application also provides a \includegraphics[width=160pt]{Pictures/gcjm_btn} button. Clicking this sequentially runs \GeneCount and \JunctionMake on the contents of ``mapped\_sam\_files'' and the ``unmapped\_sam\_files'' automatically. Thus, one can queue in the files and let processing complete overnight. To run this option be sure that the ``Work Folder'' contains only:
\begin{itemize}
\item \texttt{mapped\_sam\_files} folder with mapped \texttt{.sam} files
\item \texttt{unmapped\_sam\_files} folder with corresponding unmapped \texttt{.sam} files
\end{itemize}
If there are no other folders, you can be sure to avoid any \textbf{WARNINGS} that might interrupt the processing workflow. Alternatively, you can unclick the box on the \DEEPN window to skip such prompts.
%----------------------------------------------------------------------------------------
% PART
%----------------------------------------------------------------------------------------
\part{Analyzing Data}
\chapter{\BlastQuery}\index{Blast Query}
This program allows you to assess the fusion point between the Gal4AD and each gene/cDNA in question.
\begin{itemize}
\item[-] This program queries the blast searches done previously in \texttt{blast\_results\_query} folder
\item[-] Once loaded, you simply type in the NCBI reference number (\textbf{NM\_***})
\item[-] The fusion points and their frequency in \texttt{ppm} are displayed
\end{itemize}
Once \JunctionMake has loaded the \texttt{blast\_results\_query} folder with processed ``.p'' files, \BlastQuery can be used to analyze the junctions. Clicking the \includegraphics[width=120pt]{Pictures/blast_query} button launches the module and a new graphic user interface.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{figure2}
\caption{Screen shot of Blast Query user interface.}
\label{fig:blast_query_screen_shot}
\end{figure}
Enter in GeneID in the text box above in Figure \ref{fig:blast_query_screen_shot}.
These are \textbf{NM\_*} identifiers that can be found in relation to gene names in the \texttt{summary.csv} files generated by \GeneCount. An example is \textbf{NM\_146001}.
Then use the pull-down menu to select which datasets to compare. The list of datasets is generated by reading what ``.p'' files are in the \texttt{blast\_results\_query} folder that is within your ``Work Folder''. If the right .p file is not in that folder, you can simply move it in from somewhere else.
The data window can be selected to display \textbf{Results} , \textbf{Filtered Results} or \textbf{Plot} as shown in Figure \ref{fig:blast_query_screen_shot2}.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{figure3}
\caption{Screen shot of Blast Query user interface.}
\label{fig:blast_query_screen_shot2}
\end{figure}
\begin{remark}
\textbf{Nomenclature for Yeast Open-Reading Frames}.
For most yeast (sacCer3) genes, using the genebank accession numbers is not helpful. Rather, the \emph{Saccharomyces} Genome Database (SGD) uses a different systematic name and also uses common names. For instance, the genebank entry: NM\_001180490.1 corresponds to the gene having the common name: \emph{CDC1}, which has the SGD systematic name: YDR182W. \DEEPN uses a custom naming scheme whereby the gene above is designated: \textbf{YDR182W\_CDC1}. These names are in the \GeneCount Summary .csv file and should be the search term for using \BlastQuery to search through yeast genomic Y2H libraries.
\end{remark}
\section{Results Tab}\index{Blast Query Results}
The ``Results Tab'' shows
\begin{itemize}
\item \textbf{Position} (which is the position of first nucleotide of the given insert that found for a particular junction read).
\item \textbf{\#Junctions} is a count, in PPM, for how abundant that particular junction is
\item \textbf{QueryStart} comes from the ``q.start'' value of the \texttt{blastn} search used to identify the downstream gene fused to the Gal4AD. It refers to how many nucleotides are between the junction tag and the matched cDNA position. Often, this number is 1, but sometimes there are cloning artifacts that generate extra sequence between the end of the Gal4-AD and where the match is to the cDNA. These extra nucleotides will have an impact on the translation reading frame. Also, to accomodate the pGAD-C1,-C2,-C3 libraries, the junction sequence is set back a bit from where the insert is positioned, so the q.start is larger. All this means that the \textbf{Position} and the \textbf{QueryStart} are used to calculate whether a particular junction is within the Coding Region (CDS) and whether it is in-frame.
\item \textbf{CDS} shows whether the junction site in the mRNA of interest is upstream of the coding region, downstream of the coding region, or within (In ORF) of the coding region.
\item \textbf{Frame} calculates whether the downstream library insert that encodes the candidate protein of interest is in the same frame as the upstream Gal4AD. Two other values can be returned in this column. ``Intron'' means that the DNA insert in question could be interrupted by an intron. This is only a problem with Y2H libraries made from genomic DNA where such things exist. For instance the pGAD-C1,2,3 libraries made from yeast genomic DNA. Because of this, \BlastQuery will be agnostic and not calculate whether the candidate gene was actually in frame. ``Backwards'' means that the Downstream Reading Frame is in the opposite orientation. This is common finding with y2H libraries from genomic DNA, which can go into the plasmid backbone in either orientation. It is less of an issue with many Y2H cDNA libraries that use a directional cloning strategy. This ``Backwards'' error is also flagged with a false QueryStart value of ``1000''
\end{itemize}
\section{Filtered Results Tab}\index{Blast Query Filtered Results}
Sometimes, sequencing the same junction multiple times leads to sequence errors which can produce reads that have the same \textbf{Position} but different \textbf{QueryStarts}, or that have \textbf{Positions} that are 1-2 bp different from the main \textbf{Position}. To simplify comparisons, one can use the ``Filtered Results'' tab, which collapses all similar \textbf{Positions} that may have different \textbf{QueryStarts} into a single value. Filtered Results also displays only the \textbf{Positions} that are found within the \textbf{Left} most dataset. Thus, by placing, say, a total unselected library dataset on the left, the \textbf{Right}-hand datasets will only display position sites that are in common with the unselected library dataset.
\section{Plot Tab}\index{Blast Query Plot Tab}
The ``Plot'' tab creates a quick graphic displaying where each junction site is along the given mRNA or gene. The abundance of each junction sequence within the dataset are plotted on the Y axis in ppm. If a junction site is downstream of the CDS or out of translational frame, the bar is grey. If it is within the CDS and in-frame it is {\color{RoyalBlue}{dark blue}}, and if it is upstream of the CDS start but within the correct translational reading frame, it is {\color{cyan}{cyan}}. The start and stop sites for translation are shown with {\color{red}{red}} bars. The mRNA/gene sequence itself is given in the text box below, where the protein coding sequence is shown in black text and the upstream and downstream untranslated regions are in grey.
\section{Export for Graphing}\index{Export for Graphing}
A \includegraphics[width=80pt]{Pictures/save_csv_btn} button is found at the top right hand corner.
This will save the current \textbf{Results} and \textbf{Filtered Results} Tables in a \texttt{.csv} file that can be opened in \texttt{Microsoft Excel}. Results from each selected dataset are saved in a different sheet.
This output pasted program for further analysis or graphing such as GraphPad Prism
\vspace{10pt}
\href{http://www.graphpad.com/scientific-software/prism/}{http://www.graphpad.com/scientific-software/prism/}
\chapter{\ReadDepth}\index{Read Depth}
\ReadDepth is used as an aid to determine the 3’ end of the interacting cDNA insert. Most genes that enrich upon selection are represented by a single plasmid, which can be surmised by having a dominant junction site. Thus, much of how a given cDNA insert extends downstream can be determined by read-depth along the cDNA sequence. \ReadDepth determines this by measuring how many sequences within the dataset contain a 25 bp interval of the cDNA sequence in question. \ReadDepth first makes a subset of all the mapped reads that correspond to the gene of interest and then counts how many of these reads contain sequence that match along the length of the cDNA.
The up/down arrows can be used to adjust the interval distance in increments of 50 nt. Users can also directly enter a desired interval, with a minimum interval of 50~nt.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{figure4}
\caption{Screen shot of Read Depth user interface.}
\label{fig:blast_query_screen_shot2}
\end{figure}
\cleardoublepage
\chapterimage{chapter_head_1.pdf} % Chapter heading image
\chapter{StatMaker Overview}
\section{About StatMaker}\index{About StatMaker}
\StatMaker is a separate program that can be used to rank candidates for the specificity of interacitng with one bait vs another using data generated by the \DEEPN procedures and computations. Currently, StatMaker is an application for Mac only.
\StatMaker is a program that allows you to rank the likelihood that a given gene is specifically enriched for interacting with a particular bait. A high ranking (p=1) indicates the most likely, whereas p=0 indicates least likely.
\StatMaker will compare the .csv files that are generated by \GeneCount. These files are configured with particular headers and columns that should not be manipulated before StatMaker analysis since that could render them unreadable by StatMaker.
\StatMaker can make two types of comparisons:
\begin{itemize}
\item It can compare each gene for its enrichment on Vector(control) alone vs 1 bait of interest.
\item It can perform a 3 way comparison between Vector(control) vs Bait1 vs Bait2. This configuration is useful for when conformations of a given bait of interest are used. For instance, if one has a wildtype bait (Bait1) and a mutant loss-of-function bait (Bait2), one would use the 3-way comparison to rank Y2H interactors that are specific for bait1 thus distinguishing themselves by not interacting strongly with either Vector(control) alone or bait2.
\end{itemize}
\section{Obtaining StatMaker}\index{Obtaining StatMaker}
\StatMaker is launched on its own and is a separate program from the main \DEEPN software application.
\StatMaker can be downloaded at
https://github.com/emptyewer/DEEPN/releases
as
StatMaker\_v1.1\_OSX.tar.bz2
Once downloaded, double click the package to de-compress and it will reveal the StatMaker.app that can be moved into any location.
\StatMaker needs to have other programs to run. These include:
\begin{itemize}
\item R (https://www.r-project.org)
\item JAGS (https://sourceforge.net/projects/mcmc-jags/files/)
\item Bioconductor and biocLite (https://www.bioconductor.org)
\item DEEPN development tools (https://github.com/pbreheny/deepn)
\end{itemize}
\StatMaker will help you install these programs once you open the StatMaker application.
\section{Configuring StatMaker}\index{Configuring StatMaker}
\StatMaker will first ask you to designate your workfolder. Navigate to the folder containing the relevant output files from \GeneCount. These are created within your project folder in a subfolder called ‘gene\_count\_summary’. Navigate to the contents of this folder and select ‘Choose’ from the dialog box. The top window of \StatMaker will now display all of the compatible .csv files within that folder that can be analyzed by \StatMaker. Now simply drag those into the indicated categories to configure the \StatMaker analysis.
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{statmaker1}
\caption{Screen shot of \StatMaker user interface.}
\label{fig:statmaker_ui}
\end{figure}
\StatMaker requires pairs of datasets (.csv files). This pair consists of a summary.csv file of the Non-selected Y2H population and the matched population put under selection for Y2H interactions. Basically, the summary.csv files for a given bait/library population grown in the presence and absence of Histidine, respectively.
\StatMaker requires 2 such paired datasets for Vector alone. It requires two so that it can estimate the variance with the \DEEPN procedure as it was performed. \StatMaker expects that variance between independent Vector-alone dataset pairs to give a good indication of how reproducible the other dataset might be.
% \begin{figure}[!ht]
% \centering
% \includegraphics[width=0.8\textwidth]{StatMakerGUIFig}
% \caption{Screen shot of \StatMaker user interface.}
% \label{fig:StatMaker screenshot}
% \end{figure}
\StatMaker only requires a single set of paired .csv datafiles for each Bait. Although, it will also use 2 pairs of datasets for each bait if they are available. For a 2-way comparison, populate the Vector1 (Selected and Non-Selected) and Vector2 fields (Selected and Non-Selected) and at least one of the Bait1 (Selected and Non-Selected) fields. For a 3-way comparison, do the above but also add datasets to the Bait2 (Selected and Non-Selected) fields.
Populate each field by simply dragging the relevant file from the top window into the desired configuration. If you make a mistake, click the file in the field to select and hit ‘delete/backspace’.
\section{Installing Additional Modules for StatMaker}\index{Installing Additional Modules for StatMaker}
You first need to verify the installation of other software. Do this by clicking the
Verify Installation button. \StatMaker will check to see if your computer has all the other software needed to run the analysis. If you do not, it will take you through a series of installations:
\begin{itemize}
\item The first will be to install JAGS, this uses a standard package installation interface that will be familiar to Mac Users and will require Administration privileges and authorization.
\item The second will be to install R, this too will use a standard package installation interface. Both the JAGS and the R packages are contained within the StatMaker application. These are the versions that will be installed. If R and/or JAGS is already installed, the StatMaker will not prompt you to install again. If R and/or JAGS were already present on your computer before using \StatMaker, \StatMaker will skip this installation and not replace the previously installed R and/or JAGS.
\item The third set of installations is for Bioconductor as well as a set of \DEEPN-specific tools. These are not packaged with the \StatMaker software, but are obtained through the web. Installation of these additional modules is taken care of automatically by StatMaker. However, if problems arise at this step, these modules can manually installed with the Applications/Utilities/Terminal program according to the step-by-step instructions at the end of this section.
\end{itemize}
\section{Running StatMaker}\index{Running StatMaker}
\begin{figure}[!ht]
\centering
\includegraphics[width=0.8\textwidth]{statmaker2}
\caption{Screen shot of \StatMaker user interface.}
\label{fig:StatMaker_Output_File}
\end{figure}
After configuring the files for analysis, the next step is to select a value for the Threshold (PPM). The default is 3 PPM, and what this value does is eliminate analysis of any gene whose abundance in the dataset is below 3 PPM. By eliminating these low-abundance genes, the confidence of ranking the remaining genes increases. One can raise this limit and make the ranking more confident, but this runs the risk of eliminating too many genes from the analysis.
Simply click the ‘Run’ button to compute.
The lower lefthand corner of the dialog box will report
‘Running DEEPN statistics... Please Wait...’ and the interface will lock out all further movement of files. Depending of the power of the computer, analysis can take between 2-10 min and is affected by the Threshold PPM value.
Once ranking is complete, \StatMaker will report in the lower lefthand corner that it has:
‘Saved Results to File: >Place\_where\_file\_is\_stored<’
The name of the results file is ‘statmaker\_output.csv’ and it can be found in the Folder you chose that contained the original ‘data\_summary.csv’ files that were chosen for analysis. This file can be opened in Excel and sorted and analyzed.
\section{Manual Installation of DEEPN-modules}\index{Manual Installation of DEEPN-modules}
Normally, \StatMaker will guide you through the installation process for the additional programs it needs to operate. These are:
\begin{itemize}
\item R (https://www.r-project.org)
\item JAGS (https://sourceforge.net/projects/mcmc-jags/files/)
\item Bioconductor and biocLite (https://www.bioconductor.org)
\item DEEPN development tools (https://github.com/pbreheny/deepn)
\end{itemize}
Both R and JAGS can be easily downloaded as installation packages from the locations listed above.
Alternatively, package install files can be found within the \StatMaker application itself.
From the Finder, Navigate the the \StatMaker application and right-click/control-click the icon to reveal the selection of ‘Show Package Contents’. Navigate to Contents>Resources>Statistics to find the JAGS.pkg and R.pkg files. Open each to go through each installation procedure.
Once R is installed, you can then manually install the other modules \StatMaker needs to run.
Open the Terminal program and type:
\textbf{R}
This should open the R program and display:
\begin{lstlisting}
R version 3.3.1 (2016-06-21) -- Bug in Your Hair
Copyright (C) 2016 The R Foundation for Statistical Computing
R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type license() or licence() for distribution details.
Natural language support but running in an English locale
R is a collaborative project with many contributors.
Type contributors() for more information and
citation() on how to cite R or R packages in publications.
Type demo() for some demos, help() for on-line help, or
help.start() for an HTML browser interface to help.
Type q() to quit R.
>
\end{lstlisting}
Copy the following line and paste it behind the ‘>’ prompt and hit return:\\
\textbf{source("https://bioconductor.org/biocLite.R"}
Once the ‘>’ prompt appears again, Copy the following line and paste it behind the ‘>’ prompt and hit return:
\textbf{biocLite()}
Once the ‘>’ prompt appears again, Copy the following line and paste it behind the ‘>’ prompt and hit return:
\textbf{install.packages("devtools", repos='http://cran.us.r-project.org')}
Once the ‘>’ prompt appears again, Copy the following line and paste it behind the ‘>’ prompt and hit return:
\textbf{devtools::install\_github("pbreheny/deepn")}
You should see the following output:
\begin{lstlisting}
Downloading GitHub repo pbreheny/deepn@master
from URL https://api.github.com/repos/pbreheny/deepn/zipball/master
Installing deepn
/Library/Frameworks/R.framework/Resources/bin/R' --no-site-file --no-environ
--no-save --no-restore --quiet CMD INSTALL
/private/var/folders/93/pzzlhvd96xz_cnpfwv5vqmrm0000gq/T/
RtmpsOR27h/devtoolsed262ae1213f/pbreheny-deepn-0b06d3d
--library=/Library/Frameworks/R.framework/Versions/3.3/Resources/library
--install-tests
* installing *source* package 'deepn' ...
** R
** inst
** preparing package for lazy loading
** help
No man pages found in package 'deepn'
*** installing help indices
** building package indices
** testing if installed package can be loaded
* DONE (deepn)
>
\end{lstlisting}
Now you are done with all the installations. You can quit Terminal and return to work with \StatMaker.
\cleardoublepage
\part{Appendix}
%----------------------------------------------------------------------------------------
% CHAPTER 3
%----------------------------------------------------------------------------------------
\chapterimage{chapter_head_1.pdf}
\chapter{Appendix}\index{Appendix}
\section{Experimental Materials}\index{Experimental Materials}
\subsection{Yeast Growth Media}\index{Yeast Growth Media}
\textbf{Yeast Synthetic Defined (SD) Media}
\begin{itemize}
\item Yeast Nitrogen Base (ammonium sulfate) w/o amino acids (Research Products International; Prospect, Illinois)
\item Dextrose (2\% final) (Research Products International; Prospect, Illinois)
\item Supplement
Adenine (200 mg/l) (Research Products International; Prospect, Illinois), Arginine (20 mg/l) (Research Products International; Prospect, Illinois), Aspartic acid (100mg/l) (Life Technologies; Grand Island, NY), Glutamate monosodium (100mg/l) (FisherScientific, Waltham, MA), Histidine (200mg/l) (Sigma-Aldrich; St. Louis, MO), Leucine (60mg/l) (Sigma-Aldrich; St. Louis, MO), Lysine mono-HCl (30mg/l) (Sigma-Aldrich; St. Louis, MO), Methionine (200mg/l) (Research Products International; Prospect, Illinois), Phenylalanine (50mg/l) (Sigma-Aldrich; St. Louis, MO), Serine (375mg/l) (Sigma-Aldrich; St. Louis, MO), Threonine (200mg/l) (Research Products International; Prospect, Illinois), Tryptophan (200mg/l) (Research Products International; Prospect, Illinois), Tyrosine (30mg/l) (Research Products International; Prospect, Illinois), Valine (150 mg/l) (Research Products International; Prospect, Illinois), Uracil (200 mg/l) (Sigma-Aldrich; St. Louis, MO).
\item For plates add 1.5\% Bacto Agar (BD; Franklin Lakes, NJ)
\item For plasmid or Y2H selection, omit Leucine, Tryptophan, or Histidine as needed
\end{itemize}
\textbf{Yeast Rich Media (YPD)}\index{Yeast Rich Media}
\begin{itemize}
\item Peptone (20g/l) (Research Products International; Prospect, Illinois)
\item Yeast Extract (10g/l) (Research Products International; Prospect, Illinois)
\item Glucose (20g/l)
\end{itemize}
\textbf{Buffered Yeast Rich Media (bYPDA)}
\begin{itemize}
\item Peptone (20g/l) (Research Products International; Prospect, Illinois)
\item Yeast Extract (10g/l) (Research Products International; Prospect, Illinois)
\item Glucose (20g/l)
\item Adenine (200mg/l)
\item \emph{adjust PH of above media to 3.7 with HCl, then filter sterilize}
\end{itemize}
\subsection{Reagents}\index{Reagents}
\textbf{TWIRL sample buffer}\index{TWIRL Buffer}
\begin{itemize}
\item 8M Urea (Research Products International; Prospect, Illinois)
\item 4\% SDS (Research Products International; Prospect, Illinois)
\item 10\% glycerol (ThermoFisher, Waltham, MA)
\item 50 mM Tris-HCl, pH 6.8 (Gibco)
\item 0.02\% bromophenol blue (Amresco)
\end{itemize}
\textbf{Yeast 2 hybrid library}\index{Yeast-2-Hybrid Library}
\begin{itemize}
\item Normalized Universal Mouse cDNA library, Mate\&Plate, Cat No: 630483 (Clontech, Mountain View, CA)
\end{itemize}
\textbf{PCR and Cloning}\index{PCR}
\begin{itemize}
\item Gibson Assembly Master Mix, Cat No: E2611L (New England Biolabs, Ipswitch, MA)
\item NEBNext High-Fidelity 2x PCR Master Mix, Cat NO: M0541S (New England BioLabs, Ipswitch, MA)
\item Primers to amplify library inserts of mouse cDNA library in pGADT7:
\item F1-primer 5’-TCACGGCTAGTAAAATTGATGATGG-3’
\item R1-primer 5’-GTCCAAAGCTTCTGAATAAGCCCTCG-3’
\item QIAquick PCR purification kit Qiagen, Cat No: 28104
\item EcoRI (New England Biolabs, Ipswitch, MA)
\item BamHI (New England Biolabs, Ipswitch, MA)
\end{itemize}
\textbf{Antibodies}\index{Antibodies}
\begin{itemize}
\item Monoclonal anti-HA antibodies were purchased from Biolegend; San Diego, CA (cat no: 901514). Polyclonal anti-myc antibodies were purchased from QED Biosciences Inc.; San Diego CA (cat no: 18826).
\end{itemize}
\textbf{Other Reagents}\index{Other Reagents}
\begin{itemize}
\item Zymolyase 100T (USBiological; Swampscott, MA; cat no: Z1004) 10mg/ml in Buffer (50mM K2PO4 pH 7.5/50\%Glycerol) stored at -20°C
\item RNAse A, DNAse protease-free stock (ThermoFisher; Waltham, MA; cat no: EN0531) stored at -20°C
\item Phenol/Chloroform/Isoamyl alcohol
\end{itemize}
\subsection{Yeast Strains and Plasmids}\index{Yeast Strains and Plasmids}
\begin{itemize}
\item Y187: MAT$\alpha$, ura3-52, his3-200, ade2-101, trp1-901, leu2-3, 112, gal4$\Delta$, met–, gal80$\Delta$, URA3::GAL1UAS-GAL1TATA-lacZ. (Clontech, Mountain View, CA)
\item PJ69-4A MATa leu2-3,112 ura3-52 trp1-901 his3-200 gal4$\Delta$, gal80$\Delta$, GAL-ADE2 lys2::GAL1-HIS3 met2::GAL7- LacZ http://depts.washington.edu/yeastrc/
\item pGBKT7. Gal4-DNA binding domain expression plasmid. (Clontech, Mountain View, CA)
\item pGADT7(Gal4-activation domain expression plasmid(Clontech, Mountain View, CA)
\end{itemize}
% %----------------------------------------------------------------------------------------
% % BIBLIOGRAPHY
% %----------------------------------------------------------------------------------------
% \chapter*{Bibliography}
% \addcontentsline{toc}{chapter}{\textcolor{ocre}{Bibliography}}
% \section*{Books}
% \addcontentsline{toc}{section}{Books}
% \printbibliography[heading=bibempty,type=book]
% \section*{Articles}
% \addcontentsline{toc}{section}{Articles}
% \printbibliography[heading=bibempty,type=article]
%----------------------------------------------------------------------------------------
% INDEX
%----------------------------------------------------------------------------------------
\cleardoublepage
\phantomsection
\setlength{\columnsep}{0.75cm}
\addcontentsline{toc}{chapter}{\textcolor{ocre}{Index}}
\printindex
%----------------------------------------------------------------------------------------
\end{document} | {
"alphanum_fraction": 0.738698667,
"avg_line_length": 59.5328,
"ext": "tex",
"hexsha": "54dfe96a047b42692030faa2eec9602ed7dfab98",
"lang": "TeX",
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2019-08-29T15:33:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-06-17T15:39:44.000Z",
"max_forks_repo_head_hexsha": "0c54c714af3e08ce3626e9fd5d795d7d5f28aeca",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "emptyewer/DEEPN",
"max_forks_repo_path": "user guide/User_Guide.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "0c54c714af3e08ce3626e9fd5d795d7d5f28aeca",
"max_issues_repo_issues_event_max_datetime": "2016-01-31T03:35:48.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-01-31T03:35:48.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "emptyewer/DEEPN",
"max_issues_repo_path": "user guide/User_Guide.tex",
"max_line_length": 1215,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "0c54c714af3e08ce3626e9fd5d795d7d5f28aeca",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "emptyewer/DEEPN",
"max_stars_repo_path": "user guide/User_Guide.tex",
"max_stars_repo_stars_event_max_datetime": "2021-02-03T06:15:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-02-13T05:37:05.000Z",
"num_tokens": 20061,
"size": 74416
} |
\documentclass{article}
\usepackage{fancyvrb}
\usepackage{graphicx}
\usepackage{fullpage}
\usepackage{relsize}
\usepackage{url}
\usepackage{hevea}
\usepackage[shortcuts]{extdash}
\usepackage{textcomp}
% \usepackage{verbdef}
\def\topfraction{.9}
\def\dbltopfraction{\topfraction}
\def\floatpagefraction{\topfraction} % default .5
\def\dblfloatpagefraction{\topfraction} % default .5
\def\textfraction{.1}
%HEVEA \footerfalse % Disable hevea advertisement in footer
\newcommand{\code}[1]{\ifmmode{\mbox{\relax\ttfamily{#1}}}\else{\relax\ttfamily #1}\fi}
%% Hevea version omits "\smaller"
%HEVEA \renewcommand{\code}[1]{\ifmmode{\mbox{\ttfamily{#1}}}\else{\ttfamily #1}\fi}
\newcommand{\includeimage}[2]{
\begin{center}
\ifhevea\imgsrc{#1.png}\else
\resizebox{!}{#2}{\includegraphics{figures/#1}}
\vspace{-1.5\baselineskip}
\fi
\end{center}}
% Add line between figure and text
\makeatletter
\def\topfigrule{\kern3\p@ \hrule \kern -3.4\p@} % the \hrule is .4pt high
\def\botfigrule{\kern-3\p@ \hrule \kern 2.6\p@} % the \hrule is .4pt high
\def\dblfigrule{\kern3\p@ \hrule \kern -3.4\p@} % the \hrule is .4pt high
\makeatother
\title{Annotation File Format Specification}
% Hevea ignores \date, so move the date into \author
\author{\url{https://checkerframework.org/annotation-file-utilities/} \\
\today}
\date{}
\begin{document}
\maketitle
%HEVEA \setcounter{tocdepth}{2}
\tableofcontents
\section{Purpose: External storage of annotations\label{purpose}}
Java annotations are meta-data about Java program elements, as in
``\code{@Deprecated class Date \{ \ldots\ \}}''.
Ordinarily, Java annotations are written in the source code of a
\code{.java} Java source file. When \code{javac} compiles the source code,
it inserts the annotations in the resulting \code{.class} file (as
``attributes'').
Sometimes, it is convenient to specify the annotations outside the source
code or the \code{.class} file.
\begin{itemize}
%BEGIN LATEX
\itemsep 0pt \parskip 0pt
%END LATEX
\item
When source code is not available, a textual file provides a format for
writing and storing annotations that is much easier to read and modify
than a \code{.class} file. Even if the eventual purpose is to insert the
annotations in the \code{.class} file, the annotations must be specified
in some textual format first.
\item
Even when source code is available, sometimes it should not be changed,
yet annotations must be stored somewhere for use by tools.
\item
A textual file for annotations can eliminate code clutter. A developer
performing some specialized task (such as code verification,
parallelization, etc.)\ can store annotations in an annotation file without
changing the main version of the source code. (The developer's private
version of the code could contain the annotations, but the developer
could move them to the separate file before committing changes.)
\item
Tool writers may find it more convenient to use a textual file, rather
than writing a Java or \code{.class} file parser.
\item
When debugging annotation-processing tools, a textual file format
(extracted from the Java or \code{.class} files) is easier to read and
is easier for use in testing.
\end{itemize}
All of these uses require an external, textual file format for Java annotations.
The external file format should be easy for people to create, read, and
modify.
%
An ``annotation file'' serves this purpose by specifying a set of
Java annotations.
The Annotation File Utilities
(\url{https://checkerframework.org/annotation-file-utilities/}) are a set
of tools that process annotation files.
The file format discussed in this document supports both standard Java SE 5
declaration annotations and also the type annotations that are introduced by Java SE 8.
The file format provides a simple syntax to represent the structure of a Java
program. For annotations in method bodies of \code{.class} files the annotation
file closely follows
section ``Class File Format Extensions'' of the JSR 308 design document~\cite{JSR308-webpage-201310},
which explains how the annotations are stored in the \code{.class}
file.
In that sense, the current design is extremely low-level, and users
probably would not want to write the files by hand (but they might fill in a
template that a tool generated automatically). As future work, we should
design a more
user-friendly format that permits Java signatures to be directly specified.
For \code{.java} source files, the file format provides a separate, higher-level
syntax for annotations in method bodies.
%% I don't like this, as it may force distributing logically connected
%% elements all over a file system. Users should be permitted, but not
%% forced, to adopt such a file structure. -MDE
% Each file corresponds to exactly one
% ``.class'' file, so (for instance) inner classes are written in
% separate annotation files, named in the same ``{\tt
% OuterClass\$InnerClass}'' pattern as the ``.class'' file.
By convention, an annotation file ends with ``\code{.jaif}'' (for ``Java
annotation index file''), but this is not required.
% \verbdef\lineend|"\n"|
%BEGIN LATEX
\DefineShortVerb{\|}
\SaveVerb{newline}|\n|
\UndefineShortVerb{\|}
\newcommand{\lineend}{\bnflit{\UseVerb{newline}}}
%END LATEX
%HEVEA \newcommand{\bs}{\char"5C}
%HEVEA \newcommand{\lineend}{\bnflit{\bs{}n}}
% literal
\newcommand{\bnflit}[1]{\textrm{``}\textbf{#1}\textrm{''}}
% non-terminal
\newcommand{\bnfnt}[1]{\textsf{\emph{#1}}}
% comment
\newcommand{\bnfcmt}{\rm \# }
% alternative
\newcommand{\bnfor}{\ensuremath{|}}
\section{Grammar\label{grammar}}
This section describes the annotation file format in detail by presenting it in
the form of a grammar. Section~\ref{grammar-conventions} details the conventions
of the grammar. Section~\ref{java-file-grammar} shows how to represent the
basic structure of a Java program (classes, methods, etc.) in an annotation
file. Section~\ref{annotations-grammar} shows how to add annotations to an
annotation file.
\subsection{Grammar conventions\label{grammar-conventions}}
Throughout this document, ``name'' is any valid Java simple name or
binary name, ``type'' is any valid type, and ``value'' is any
valid Java constant, and quoted strings are literal values.
%
The Kleene qualifiers ``*'' (zero or more), ``?'' (zero or one), and ``+''
(one or more) denote plurality of a grammar element.
%
A vertical bar (``\bnfor'') separates alternatives.
Parentheses (``()'') denote grouping, and square brackets (``[]'')
denote optional syntax, which is equivalent to ``( \ldots\ )\ ?''\ but more concise.
We use the hash/pound/octothorpe symbol (``\#'') for comments within the grammar.
In the annotation file,
besides its use as token separator,
whitespace (excluding
newlines) is optional with one exception: no space is permitted
between an ``@'' character and a subsequent name. Indentation is
ignored, but is encouraged to maintain readability of the hierarchy of
program elements in the class (see the example in Section~\ref{example}).
Comments can be written throughout the annotation file using the double-slash
syntax employed by Java for single-line comments: anything following
two adjacent slashes (``//'') until the first newline is a comment.
This is omitted from the grammar for simplicity.
Block comments (``/* \ldots\ */'') are not allowed.
The line end symbol \lineend{} is used for all the different line end
conventions, that is, Windows- and Unix-style newlines are supported.
\subsection{Java file grammar\label{java-file-grammar}}
This section shows how to represent the basic structure of a Java program
(classes, methods, etc.) in an annotation file. For Java elements that can
contain annotations, this section will reference grammar productions contained
in Section~\ref{annotations-grammar}, which describes how annotations are used
in an annotation file.
An annotation file has the same basic structure as a Java program. That is,
there are packages, classes, fields and methods.
The annotation file may omit certain program elements --- for instance, it
may mention only some of the packages in a program, or only some of the
classes in a package, or only some of the fields or methods of a class.
Program elements that do not appear in the annotation file are treated as
unannotated.
\subsubsection{Package definitions\label{package-definitions}}
At the root of an annotation file is one or more package definitions.
A package definition describes a package containing a list of annotation
definitions and classes. A package definition also contains any
annotations on the package (such as those from a
\code{package-info.java} file).
\begin{tabbing}
\qquad \= \kill
\bnfnt{annotation-file} ::= \\
\qquad \bnfnt{package-definition}+ \\
\\
\bnfnt{package-definition} ::= \\
\qquad \bnflit{package} ( \bnflit{:} ) \bnfor{} ( \bnfnt{name} \bnflit{:} \bnfnt{decl-annotation}* ) \lineend \\
\qquad ( \bnfnt{annotation-definition} \bnfor{} \bnfnt{class-definition} ) *
\end{tabbing}
\noindent
Use a package line of \code{package:} for the default package. Note that
annotations on the default package are not allowed.
\subsubsection{Class definitions\label{class-definitions}}
A class definition describes the annotations present on a class declaration,
as well fields and methods of the class. It is organized according to
the hierarchy of fields and methods in the class.
Note that we use \bnfnt{class-definition} also for interfaces, enums, and
annotation types (to specify annotations in an existing annotation type, not to
be confused with \bnfnt{annotation-definition}s described in
Section~\ref{annotation-definitions}, which defines annotations to be used
throughout an annotation file); for syntactic simplicity, we use \bnflit{class}
for
all such definitions.
% TODO: add test cases for this.
Inner classes are treated as ordinary classes whose names happen to contain
\code{\$} signs and must be defined at the top level of a class definition file.
(To change this, the grammar would have to be extended with a closing
delimiter for classes; otherwise, it would be ambiguous whether a
field or method appearing after an inner class definition belonged to the
inner class or the outer class.) The syntax for inner class names is the same as
is used by the \code{javac} compiler. A good way to get an idea of the inner
class names for a class is to compile the class and look at the filenames of the
\code{.class} files that are produced.
\begin{tabbing}
\qquad \= \kill
\bnfnt{class-definition} ::= \\
\qquad \bnflit{class} \bnfnt{name} \bnflit{:} \bnfnt{decl-annotation}* \lineend \\
% TODO: is the order really important? eg. can fields and methods not
% be mixed?
\qquad \bnfnt{typeparam-definition}* \\
\qquad \bnfnt{typeparam-bound}* \\
\qquad \bnfnt{extends}* \\
\qquad \bnfnt{implements}* \\
\qquad \bnfnt{field-definition}* \\
\qquad \bnfnt{staticinit}* \\
\qquad \bnfnt{instanceinit}* \\
\qquad \bnfnt{method-definition}*
\end{tabbing}
\noindent
Annotations on the \bnflit{class} line are annotations on the class declaration,
not the class name.
\paragraph{Type parameter definitions}
The \bnfnt{typeparam-definition} production defines annotations on the
declaration of a type parameter, such as on \code{K} and \code{T} in
\begin{verbatim}
public class Class<K> {
public <T> void m() {
...
}
}
\end{verbatim}
or on the type parameters on the left-hand side of a member reference,
as on \code{String} in \code{List<String>::size}.
\begin{tabbing}
\qquad \= \kill
\bnfnt{typeparam-definition} ::= \\
\qquad \bnfcmt The integer is the zero-based type parameter index. \\
\qquad \bnflit{typeparam} \bnfnt{integer} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\paragraph{Type Parameter Bounds}
The \bnfnt{typeparam-bound} production defines annotations on a bound of a
type variable declaration, such as on \code{Number} and \code{Date} in
\begin{verbatim}
public class Class<K extends Number> {
public <T extends Date> void m() {
...
}
}
\end{verbatim}
\begin{tabbing}
\qquad \= \kill
\bnfnt{typeparam-bound} ::= \\
% The bound should really be a sub-element of the typeparam!
\qquad \bnfcmt The integers are respectively the parameter and bound indexes of \\
\qquad \bnfcmt the type parameter bound~\cite{JSR308-webpage-201310}. \\
\qquad \bnflit{bound} \bnfnt{integer} \bnflit{\&} \bnfnt{integer} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\paragraph{Implements and extends}
The \bnfnt{extends} and \bnfnt{implements} productions
define annotations on the names of classes a class \code{extends} or
\code{implements}.
(Note: For interface declarations, \bnfnt{implements} rather than
\bnfnt{extends} defines annotations on the names of extended
interfaces.)
\begin{tabbing}
\qquad \= \kill
\bnfnt{extends} ::= \\
\qquad \bnflit{extends} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}* \\
\\
\bnfnt{implements} ::= \\
\qquad \bnfcmt The integer is the zero-based index of the implemented interface. \\
\qquad \bnflit{implements} \bnfnt{integer} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\paragraph{Static and instance initializers}
The \bnfnt{staticinit} and \bnfnt{instanceinit} productions
define annotations on code within static or instance initializer blocks.
\begin{tabbing}
\qquad \= \kill
\bnfnt{staticinit} ::= \\
\qquad \bnfcmt The integer is the zero-based index of the implemented interface. \\
\qquad \bnflit{staticinit} \bnflit{*} \bnfnt{integer} \bnflit{:} \lineend \\
\qquad \bnfnt{compound-type}*
\\
\bnfnt{instanceinit} ::= \\
\qquad \bnfcmt The integer is the zero-based index of the implemented interface. \\
\qquad \bnflit{instanceinit} \bnflit{*} \bnfnt{integer} \bnflit{:} \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\subsubsection{Field definitions\label{field-definitons}}
A field definition can have annotations on the declaration, the type of the
field, or --- if in source code --- the field's initialization.
\begin{tabbing}
\qquad \= \kill
\bnfnt{field-definition} ::= \\
\qquad \bnflit{field} \bnfnt{name} \bnflit{:} \bnfnt{decl-annotation}* \lineend \\
\qquad \bnfnt{type-annotations}* \\
\qquad \bnfnt{expression-annotations}*
\end{tabbing}
\noindent
Annotations on the \bnflit{field} line are on the field declaration, not the
type of the field.
The \bnfnt{expression-annotations} production specifies annotations on the
initialization expression of a field. If a field is initialized at declaration
then in bytecode the initialization is moved to the constructor when the class
is compiled. Therefore for bytecode, annotations on the initialization
expression go in the constructor (see Section~\ref{method-definitions}), rather
than the field definition. Source code annotations for the field initialization
expression are valid on the field definition.
\subsubsection{Method definitions\label{method-definitions}}
A method definition can have annotations on the method declaration, in the
method header (return type, parameters, etc.), as well as the method body.
\begin{tabbing}
\qquad \= \kill
\bnfnt{method-definition} ::= \\
\qquad \bnflit{method} \bnfnt{method-key} \bnflit{:} \bnfnt{decl-annotation}* \lineend \\
\qquad \bnfnt{typeparam-definition}* \\
\qquad \bnfnt{typeparam-bound}* \\
\qquad \bnfnt{return-type}? \\
\qquad \bnfnt{receiver-definition}? \\
\qquad \bnfnt{parameter-definition}* \\
% TODO: method throws
\qquad \bnfnt{variable-definition}* \\
\qquad \bnfnt{expression-annotations}*
\end{tabbing}
\urldef\jvmsMethodDescriptors\url|https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.3.3|
\noindent
The annotations on the \bnflit{method} line are on the method declaration, not
on the return value. The \bnfnt{method-key} consists of the simple name followed
by a method descriptor, which is the signature in JVML format
(see JVMS \S4.3.3, \jvmsMethodDescriptors). For example, the following method
\begin{verbatim}
boolean foo(int[] i, String s) {
...
}
\end{verbatim}
\noindent
has the \bnfnt{method-key}:
\begin{verbatim}
foo([ILjava/lang/String;)Z
\end{verbatim}
Note that the
signature is the erased signature of the method and does not contain generic
type information, but does contain the return type. Using \code{javap -s} makes
it easy to find the signature. The method keys ``\code{<init>}'' and
``\code{<clinit>}'' are used to name instance (constructor) and class (static)
initialization methods. (The name of the constructor---that is, the final
element of the class name---can be used in place of ``\code{<init>}''.)
For both instance and class initializers, the ``return type'' part of the
signature should be \code{V} (for \code{void}).
% TODO: exception types in catch clause
% TODO: .class literals
% TODO: type arguments in constructor and method calls
\paragraph{Return type}
A return type defines the annotations on the return type of a method
declaration. It is also used for the result of a constructor.
\begin{tabbing}
\qquad \= \kill
\bnfnt{return-type} ::= \\
\qquad \bnflit{return:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\paragraph{Receiver definition}
A receiver definition defines the annotations on the type of the receiver
parameter in a method declaration. A method receiver is the implicit formal
parameter, \code{this}, used in non-static methods. For source code insertion,
the receiver parameter will be inserted if it does not already exist.
Only inner classes have a receiver. A top-level constructor does not have
a receiver, though it does have a result. The type of a constructor result
is represented as a return type.
\begin{tabbing}
\qquad \= \kill
\bnfnt{receiver-definition} ::= \\
\qquad \bnflit{receiver:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\paragraph{Parameter definition}
A formal parameter definition defines the annotations on a method formal
parameter declaration and the type of a method formal parameter, but
\emph{not} the receiver formal parameter.
\begin{tabbing}
\qquad \= \kill
\bnfnt{parameter-definition} ::= \\
\qquad \bnfcmt The integer is the zero-based index of the formal parameter in the method. \\
\qquad \bnflit{parameter} \bnfnt{integer} \bnflit{:} \bnfnt{decl-annotation}* \lineend \\
\qquad \bnfnt{type-annotations}*
\end{tabbing}
\noindent
The annotations on the \bnflit{parameter} line are on the formal parameter
declaration, not on the type of the parameter. A parameter index of 0 is the
first formal parameter. The receiver parameter is not index 0. Use the
\bnfnt{receiver-definition} production to annotate the receiver parameter.
\subsection{Bytecode Locations\label{bytecode-locations}}
Certain elements in the body of a method or the initialization expression of a
field can be annotated. The \bnfnt{expression-annotations} rule describes the
annotations that can be added to a method body or a field initialization
expression:
\begin{tabbing}
\qquad \= \kill
\bnfnt{expression-annotations} ::= \\
\qquad \bnfnt{typecast}* \\
\qquad \bnfnt{instanceof}* \\
\qquad \bnfnt{new}* \\
\qquad \bnfnt{call}* \\
\qquad \bnfnt{reference}* \\
\qquad \bnfnt{lambda}* \\
\qquad \bnfnt{source-insert-typecast}* \\
\qquad \bnfnt{source-insert-annotation}*
\end{tabbing}
\noindent
Additionally, a variable declaration in a method body can be annotated with the
\bnfnt{variable-definition} rule, which appears below.
Because of the differences between Java source code and \code{.class} files,
the syntax for specifying code locations is different for \code{.class} files
and source code. For \code{.class} files we use a syntax called ``bytecode
offsets''. For source code we use a different syntax called ``source code
indexes''. These are both described below.
If you wish to be able to insert a given code annotation in both a \code{.class} file and a source
code file, the annotation file must redundantly specify the annotation's bytecode offset and source
code index. This can be done in a single \code{.jaif} file or two separate
\code{.jaif} files. It is not necessary to include
redundant information to insert annotations on signatures in both \code{.class}
files and source code.
Additionally, a new typecast with annotations (rather than an annotation added to an
existing typecast) can be inserted into source code. This uses a third
syntax that is described below under ``AST paths''.
A second way to insert a typecast is by specifying just an annotation, not
a full typecast (\code{insert-annotation} instead of
\code{insert-typecast}). In this case, the source annotation insertion
tool generates a full typecast if Java syntax requires one.
\subsubsection{Bytecode offsets\label{bytecode-offsets}}
For locations in bytecode, the
annotation file uses offsets into the bytecode array of the class file to
indicate the specific expression to which the annotation refers. Because
different compilation strategies yield different \code{.class} files, a
tool that maps such annotations from an annotation file into source code must
have access to the specific \code{.class} file that was used to generate
the annotation file. The
\code{javap -v} command is an effective technique to discover bytecode
offsets. Non-expression annotations such as those on methods,
fields, classes, etc., do not use a bytecode offset.
\subsubsection{Source code indexes\label{source-code-indexes}}
For locations in source code, the annotation file indicates the kind of
expression, plus a zero-based index to indicate which occurrence of that kind of
expression. For example,
\begin{verbatim}
public void method() {
Object o1 = new @A String();
String s = (@B String) o1;
Object o2 = new @C Integer(0);
Integer i = (@D Integer) o2;
}
\end{verbatim}
\noindent
\code{@A} is on new, index 0. \code{@B} is on typecast, index 0. \code{@C} is on
new, index 1. \code{@D} is on typecast, index 1.
Source code indexes only include occurrences in the class that exactly matches
the name of the enclosing \bnfnt{class-definition} rule. Specifically,
occurrences in nested classes are not included. Use a new
\bnfnt{class-definition} rule with the name of the nested class for source code
insertions in a nested class.
\subsubsection{Code locations grammar\label{code-grammar}}
For each kind of expression, the grammar contains a separate location rule.
This location rule contains the bytecode offset syntax followed by the
source code index syntax.
The grammar uses \bnflit{\#} for bytecode offsets and \bnflit{*} for source code indexes.
\begin{tabbing}
\qquad \= \kill
\bnfnt{variable-location} ::= \\
\qquad \bnfcmt Bytecode offset: the integers are respectively the index, start, and length \\
\qquad \bnfcmt fields of the annotations on this variable~\cite{JSR308-webpage-201310}. \\
\qquad (\bnfnt{integer} \bnflit{\#} \bnfnt{integer} \bnflit{+} \bnfnt{integer}) \\
\qquad \bnfcmt Source code index: the \bnfnt{name} is the identifier of the local variable. \\
\qquad \bnfcmt The \bnfnt{integer} is the optional zero-based index of the intended local \\
\qquad \bnfcmt variable within all local variables with the given \bnfnt{name}. \\
\qquad \bnfcmt The default value for the index is zero. \\
\qquad \bnfor{} (\bnfnt{name} [\bnflit{*} \bnfnt{integer}]) \\
\\
\bnfnt{variable-definition} ::= \\
\qquad \bnfcmt The annotations on the \bnflit{local} line are on the variable declaration, \\
\qquad \bnfcmt not the type of the variable. \\
\qquad \bnflit{local} \bnfnt{variable-location} \bnflit{:} \bnfnt{decl-annotation}* \lineend \\
\qquad \bnfnt{type-annotations}* \\
\\
\bnfnt{typecast-location} ::= \\
\qquad \bnfcmt Bytecode offset: the first integer is the offset field and the optional \\
\qquad \bnfcmt second integer is the type index of an intersection type~\cite{JSR308-webpage-201310}. \\
\qquad \bnfcmt The type index defaults to zero if not specified. \\
\qquad (\bnflit{\#} \bnfnt{integer} [ \bnflit{,} \bnfnt{integer} ]) \\
\qquad \bnfcmt Source code index: the first integer is the zero-based index of the typecast \\
\qquad \bnfcmt within the method and the optional second integer is the type index of an \\
\qquad \bnfcmt intersection type~\cite{JSR308-webpage-201310}. The type index defaults to zero if not specified. \\
\qquad \bnfor{} (\bnflit{*} \bnfnt{integer} [ \bnflit{,} \bnfnt{integer} ]) \\
\\
\bnfnt{typecast} ::= \\
\qquad \bnflit{typecast} \bnfnt{typecast-location} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}* \\
\\
\bnfnt{instanceof-location} ::= \\
\qquad \bnfcmt Bytecode offset: the integer is the offset field of the annotation~\cite{JSR308-webpage-201310}. \\
\qquad (\bnflit{\#} \bnfnt{integer}) \\
\qquad \bnfcmt Source code index: the integer is the zero-based index of the \code{instanceof} \\
\qquad \bnfcmt within the method. \\
\qquad \bnfor{} (\bnflit{*} \bnfnt{integer}) \\
\\
\bnfnt{instanceof} ::= \\
\qquad \bnflit{instanceof} \bnfnt{instanceof-location} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}* \\
\\
\bnfnt{new-location} ::= \\
\qquad \bnfcmt Bytecode offset: the integer is the offset field of the annotation~\cite{JSR308-webpage-201310}. \\
\qquad (\bnflit{\#} \bnfnt{integer}) \\
\qquad \bnfcmt Source code index: the integer is the zero-based index of the object or array \\
\qquad \bnfcmt creation within the method. \\
\qquad \bnfor{} (\bnflit{*} \bnfnt{integer}) \\
\\
\bnfnt{new} ::= \\
\qquad \bnflit{new} \bnfnt{new-location} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\\
\bnfnt{call-location} ::= \\
\qquad \bnfcmt Bytecode offset: the integer is the offset field of the annotation~\cite{JSR308-webpage-201310}. \\
\qquad (\bnflit{\#} \bnfnt{integer}) \\
\qquad \bnfcmt Source code index: the integer is the zero-based index of the method call \\
\qquad \bnfcmt within the field or method definition. \\
\qquad \bnfor{} (\bnflit{*} \bnfnt{integer}) \\
\\
\bnfnt{call} ::= \\
\qquad \bnflit{call} \bnfnt{call-location} \bnflit{:} \lineend \\
\qquad \bnfnt{typearg-definition}* \\
\\
\bnfnt{reference-location} ::= \\
\qquad \bnfcmt Bytecode offset: the integer is the offset field of the annotation~\cite{JSR308-webpage-201310}. \\
\qquad (\bnflit{\#} \bnfnt{integer}) \\
\qquad \bnfcmt Source code index: the integer is the zero-based index of the member \\
\qquad \bnfcmt reference~\cite{JSR308-webpage-201310}. \\
\qquad \bnfor{} (\bnflit{*} \bnfnt{integer}) \\
\\
\bnfnt{reference} ::= \\
\qquad \bnflit{reference} \bnfnt{reference-location} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}* \\
\qquad \bnfnt{typearg-definition}* \\
\\
\bnfnt{lambda-location} ::= \\
\qquad \bnfcmt Bytecode offset: the integer is the offset field of the annotation~\cite{JSR308-webpage-201310}. \\
\qquad (\bnflit{\#} \bnfnt{integer}) \\
\qquad \bnfcmt Source code index: the integer is the zero-based index of the lambda \\
\qquad \bnfcmt expression~\cite{JSR308-webpage-201310}. \\
\qquad \bnfor{} (\bnflit{*} \bnfnt{integer}) \\
\\
\bnfnt{lambda} ::= \\
\qquad \bnflit{lambda} \bnfnt{lambda-location} \bnflit{:} \lineend \\
%\qquad \bnfnt{return-type}? \\
\qquad \bnfnt{parameter-definition}* \\
\qquad \bnfnt{variable-definition}* \\
\qquad \bnfnt{expression-annotations}*
\\
\qquad \= \kill
\bnfnt{typearg-definition} ::= \\
\qquad \bnfcmt The integer is the zero-based type argument index. \\
\qquad \bnflit{typearg} \bnfnt{integer} \bnflit{:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\subsubsection{AST paths\label{ast-paths}}
A path through the AST (abstract
syntax tree) specifies an arbitrary expression in source code to modify.
AST paths can be used in the \code{.jaif} file to specify a location to
insert either a bare annotation (\bnflit{insert-annotation}) or a cast
(\bnflit{insert-typecast}).
For a cast insertion, the \code{.jaif} file specifies the type to cast to.
The annotations on the \bnflit{insert-typecast} line will be inserted on
the outermost type of the type to cast to. If the type to cast to is a compound
type then annotations on parts of the compound type are specified with the
\bnfnt{compound-type} rule. If there are no annotations on
the \bnflit{insert-typecast} line then a cast with no annotations will be
inserted or, if compound type annotations are specified, a cast with annotations
only on the compound types will be inserted.
Note that the type specified on the \bnflit{insert-typecast} line cannot contain
any qualified type names. For example, use \code{Entry<String, Object>} instead
of \code{Map.Entry<java.lang.String, java.lang.Object>}.
\begin{tabbing}
\bnfnt{source-insert-typecast} ::= \\
\qquad \bnfcmt \bnfnt{ast-path} is described below. \\
\qquad \bnfcmt \bnfnt{type} is the un-annotated type to cast to. \\
\qquad \bnflit{insert-typecast} \bnfnt{ast-path}\bnflit{:} \bnfnt{type-annotation}* \bnfnt{type} \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
An AST path represents a traversal through the AST. AST paths can only be
used in \bnfnt{field-definition}s and \bnfnt{method-definition}s.
An AST path starts with the first element under the definition. For
methods this is \code{Block} and for fields this is \code{Variable}.
An AST path is composed of one or more AST entries, separated by commas. Each
AST entry is composed of a tree kind, a child selector, and an optional
argument. An example AST entry is:
\begin{verbatim}
Block.statement 1
\end{verbatim}
The tree kind is \code{Block}, the child selector is \code{statement} and the
argument is \code{1}.
The available tree kinds correspond to the Java AST tree nodes (from the package
\code{com.sun.source.tree}), but with ``Tree'' removed from the name. For
example, the class \code{com.sun.source.tree.BlockTree} is represented as
\code{Block}. The child selectors correspond to the method names of the given
Java AST tree node, with ``get'' removed from the beginning of the method name
and the first letter lowercased. In cases where the child selector method
returns a list, the method name is made singular and the AST entry also contains
an argument to select the index of the list to take. For example, the method
\code{com\-.sun\-.source\-.tree\-.Block\-Tree\-.get\-Statements()} is represented as
\code{Block.statement} and requires an argument to select the statement to take.
The following is an example of an entire AST path:
\begin{verbatim}
Block.statement 1, Switch.case 1, Case.statement 0, ExpressionStatement.expression,
MethodInvocation.argument 0
\end{verbatim}
Since the above example starts with a \code{Block} it belongs in a
\bnfnt{method-definition}. This AST path would select an expression that is in
statement 1 of the method, case 1 of the switch statement, statement 0 of the
case, and argument 0 of a method call (\code{ExpressionStatement} is just a
wrapper around an expression that can also be a statement).
The following is an example of an annotation file with AST paths used to specify
where to insert casts.
\begin{verbatim}
package p:
annotation @A:
class ASTPathExample:
field a:
insert-typecast Variable.initializer, Binary.rightOperand: @A Integer
method m()V:
insert-typecast Block.statement 0, Variable.initializer: @A Integer
insert-typecast Block.statement 1, Switch.case 1, Case.statement 0,
ExpressionStatement.expression, MethodInvocation.argument 0: @A Integer
\end{verbatim}
And the matching source code:
\begin{verbatim}
package p;
public class ASTPathExample {
private int a = 12 + 13;
public void m() {
int x = 1;
switch (x + 2) {
case 1:
System.out.println(1);
break;
case 2:
System.out.println(2 + x);
break;
default:
System.out.println(-1);
}
}
}
\end{verbatim}
The following is the output, with the casts inserted.
\begin{verbatim}
package p;
import p.A;
public class ASTPathExample {
private int a = 12 + ((@A Integer) (13));
public void m() {
int x = ((@A Integer) (1));
switch (x + 2) {
case 1:
System.out.println(1);
break;
case 2:
System.out.println(((@A Integer) (2 + x)));
break;
default:
System.out.println(-1);
}
}
}
\end{verbatim}
Using \code{insert-annotation} instead of \code{insert-typecast} yields
almost the same result --- it also inserts a cast. The sole difference
is the inability to specify the type in the cast expression. If you use
\code{insert-annotation}, then the annotation inserter infers the type,
which is \code{int} in this case.
Note that a cast can be inserted on any expression, not
just the deepest expression in the AST. For example, a cast could be inserted on
the expression \code{i + j}, the identifier \code{i}, and/or the identifier \code{j}.
To help create correct AST paths it may be useful to view the AST of a class.
The Checker Framework has a processor to do this. The following command will
output indented AST nodes for the entire input program.
\begin{verbatim}
javac -processor org.checkerframework.common.util.debug.TreeDebug ASTPathExample.java
\end{verbatim}
The following is the grammar for AST paths.
\begin{tabbing}
\qquad \= \kill
\bnfnt{ast-path} ::= \\
\qquad \bnfnt{ast-entry} [ \bnflit{,} \bnfnt{ast-entry} ]+ \\
\\
\bnfnt{ast-entry} ::= \\
\qquad \bnfnt{annotated-type} \\
\qquad \bnfor{} \bnfnt{annotation} \\
\qquad \bnfor{} \bnfnt{array-access} \\
\qquad \bnfor{} \bnfnt{array-type} \\
\qquad \bnfor{} \bnfnt{assert} \\
\qquad \bnfor{} \bnfnt{assignment} \\
\qquad \bnfor{} \bnfnt{binary} \\
\qquad \bnfor{} \bnfnt{block} \\
\qquad \bnfor{} \bnfnt{case} \\
\qquad \bnfor{} \bnfnt{catch} \\
\qquad \bnfor{} \bnfnt{compound-assignment} \\
\qquad \bnfor{} \bnfnt{conditional-expression} \\
\qquad \bnfor{} \bnfnt{do-while-loop} \\
\qquad \bnfor{} \bnfnt{enhanced-for-loop} \\
\qquad \bnfor{} \bnfnt{expression-statement} \\
\qquad \bnfor{} \bnfnt{for-loop} \\
\qquad \bnfor{} \bnfnt{if} \\
\qquad \bnfor{} \bnfnt{instance-of} \\
\qquad \bnfor{} \bnfnt{intersection-type} \\
\qquad \bnfor{} \bnfnt{labeled-statement} \\
\qquad \bnfor{} \bnfnt{lambda-expression} \\
\qquad \bnfor{} \bnfnt{member-reference} \\
\qquad \bnfor{} \bnfnt{member-select} \\
\qquad \bnfor{} \bnfnt{method-invocation} \\
\qquad \bnfor{} \bnfnt{new-array} \\
\qquad \bnfor{} \bnfnt{new-class} \\
\qquad \bnfor{} \bnfnt{parameterized-type} \\
\qquad \bnfor{} \bnfnt{parenthesized} \\
\qquad \bnfor{} \bnfnt{return} \\
\qquad \bnfor{} \bnfnt{switch} \\
\qquad \bnfor{} \bnfnt{synchronized} \\
\qquad \bnfor{} \bnfnt{throw} \\
\qquad \bnfor{} \bnfnt{try} \\
\qquad \bnfor{} \bnfnt{type-cast} \\
\qquad \bnfor{} \bnfnt{type-parameter} \\
\qquad \bnfor{} \bnfnt{unary} \\
\qquad \bnfor{} \bnfnt{union-type} \\
\qquad \bnfor{} \bnfnt{variable-type} \\
\qquad \bnfor{} \bnfnt{while-loop} \\
\qquad \bnfor{} \bnfnt{wildcard-tree} \\
\\
\bnfnt{annotated-type} :: = \\
\qquad \bnflit{AnnotatedType} \bnflit{.} ( ( \bnflit{annotation} \bnfnt{integer} ) \bnfor{} \bnflit{underlyingType} ) \\
\\
\bnfnt{annotation} ::= \\
\qquad \bnflit{Annotation} \bnflit{.} ( \bnflit{type} \bnfor{} \bnflit{argument} \bnfnt{integer} ) \\
\\
\bnfnt{array-access} ::= \\
\qquad \bnflit{ArrayAccess} \bnflit{.} ( \bnflit{expression} \bnfor{} \bnflit{index} ) \\
\\
\bnfnt{array-type} ::= \\
\qquad \bnflit{ArrayType} \bnflit{.} \bnflit{type} \\
\\
\bnfnt{assert} ::= \\
\qquad \bnflit{Assert} \bnflit{.} ( \bnflit{condition} \bnfor{} \bnflit{detail} ) \\
\\
\bnfnt{assignment} ::= \\
\qquad \bnflit{Assignment} \bnflit{.} ( \bnflit{variable} \bnfor{} \bnflit{expression} ) \\
\\
\bnfnt{binary} ::= \\
\qquad \bnflit{Binary} \bnflit{.} ( \bnflit{leftOperand} \bnfor{} \bnflit{rightOperand} ) \\
\\
\bnfnt{block} ::= \\
\qquad \bnflit{Block} \bnflit{.} \bnflit{statement} \bnfnt{integer} \\
\\
\bnfnt{case} ::= \\
\qquad \bnflit{Case} \bnflit{.} ( \bnflit{expression} \bnfor{} ( \bnflit{statement} \bnfnt{integer} ) ) \\
\\
\bnfnt{catch} ::= \\
\qquad \bnflit{Catch} \bnflit{.} ( \bnflit{parameter} \bnfor{} \bnflit{block} ) \\
\\
\bnfnt{compound-assignment} ::= \\
\qquad \bnflit{CompoundAssignment} \bnflit{.} ( \bnflit{variable} \bnfor{} \bnflit{expression} ) \\
\\
\bnfnt{conditional-expression} ::= \\
\qquad \bnflit{ConditionalExpression} \bnflit{.} ( \bnflit{condition} \bnfor{} \bnflit{trueExpression} \bnfor{} \bnflit{falseExpression} ) \\
\\
\bnfnt{do-while-loop} ::= \\
\qquad \bnflit{DoWhileLoop} \bnflit{.} ( \bnflit{condition} \bnfor{} \bnflit{statement} ) \\
\\
\bnfnt{enhanced-for-loop} ::= \\
\qquad \bnflit{EnhancedForLoop} \bnflit{.} ( \bnflit{variable} \bnfor{} \bnflit{expression} \bnfor{} \bnflit{statement} ) \\
\\
\bnfnt{expression-statement} ::= \\
\qquad \bnflit{ExpressionStatement} \bnflit{.} \bnflit{expression} \\
\\
\bnfnt{for-loop} ::= \\
\qquad \bnflit{ForLoop} \bnflit{.} ( ( \bnflit{initializer} \bnfnt{integer} ) \bnfor{} \bnflit{condition} \bnfor{} ( \bnflit{update} \bnfnt{integer} ) \bnfor{} \bnflit{statement} ) \\
\\
\bnfnt{if} ::= \\
\qquad \bnflit{If} \bnflit{.} ( \bnflit{condition} \bnfor{} \bnflit{thenStatement} \bnfor{} \bnflit{elseStatement} ) \\
\\
\bnfnt{instance-of} ::= \\
\qquad \bnflit{InstanceOf} \bnflit{.} ( \bnflit{expression} \bnfor{} \bnflit{type} ) \\
\\
\bnfnt{intersection-type} ::= \\
\qquad \bnflit{IntersectionType} \bnflit{.} \bnflit{bound} \bnfnt{integer} \\
\\
\bnfnt{labeled-statement} ::= \\
\qquad \bnflit{LabeledStatement} \bnflit{.} \bnflit{statement} \\
\\
\bnfnt{lambda-expression} ::= \\
\qquad \bnflit{LambdaExpression} \bnflit{.} ( ( \bnflit{parameter} \bnfnt{integer} ) \bnfor{} \bnflit{body} ) \\
\\
\bnfnt{member-reference} ::= \\
\qquad \bnflit{MemberReference} \bnflit{.} ( \bnflit{qualifierExpression} \bnfor{} ( \bnflit{typeArgument} \bnfnt{integer} ) ) \\
\\
\bnfnt{member-select} ::= \\
\qquad \bnflit{MemberSelect} \bnflit{.} \bnflit{expression} \\
\\
\bnfnt{method-invocation} ::= \\
\qquad \bnflit{MethodInvocation} \bnflit{.} ( ( \bnflit{typeArgument} \bnfnt{integer} ) \bnfor{} \bnflit{methodSelect} \\
\qquad \bnfor{} ( \bnflit{argument} \bnfnt{integer} ) ) \\
\\
\bnfnt{new-array} ::= \\
\qquad \bnflit{NewArray} \bnflit{.} ( \bnflit{type} \bnfor{} ( \bnflit{dimension} \bnfor{} \bnflit{initializer} ) \bnfnt{integer} ) \\
\\
\bnfnt{new-class} ::= \\
\qquad \bnflit{NewClass} \bnflit{.} ( \bnflit{enclosingExpression} \bnfor{} ( \bnflit{typeArgument} \bnfnt{integer} ) \bnfor{} \bnflit{identifier} \\
\qquad \bnfor{} ( \bnflit{argument} \bnfnt{integer} ) \bnfor{} \bnflit{classBody} ) \\
\\
\bnfnt{parameterized-type} ::= \\
\qquad \bnflit{ParameterizedType} \bnflit{.} ( \bnflit{type} \bnfor{} ( \bnflit{typeArgument} \bnfnt{integer} ) ) \\
\\
\bnfnt{parenthesized} ::= \\
\qquad \bnflit{Parenthesized} \bnflit{.} \bnflit{expression} \\
\\
\bnfnt{return} ::= \\
\qquad \bnflit{Return} \bnflit{.} \bnflit{expression} \\
\\
\bnfnt{switch} ::= \\
\qquad \bnflit{Switch} \bnflit{.} ( \bnflit{expression} \bnfor{} ( \bnflit{case} \bnfnt{integer} ) ) \\
\\
\bnfnt{synchronized} ::= \\
\qquad \bnflit{Synchronized} \bnflit{.} ( \bnflit{expression} \bnfor{} \bnflit{block} ) \\
\\
\bnfnt{throw} ::= \\
\qquad \bnflit{Throw} \bnflit{.} \bnflit{expression} \\
\\
\bnfnt{try} ::= \\
\qquad \bnflit{Try} \bnflit{.} ( \bnflit{block} \bnfor{} ( \bnflit{catch} \bnfnt{integer} ) \bnfor{} \bnflit{finallyBlock} \bnfor{} ( \bnflit{resource} \bnfnt{integer} ) ) \\
\\
\bnfnt{type-cast} ::= \\
\qquad \bnflit{TypeCast} \bnflit{.} ( \bnflit{type} \bnfor{} \bnflit{expression} ) \\
\\
\bnfnt{type-parameter} ::= \\
\qquad \bnflit{TypeParameter} \bnflit{.} \bnflit{bound} \bnfnt{integer} \\
\\
\bnfnt{unary} ::= \\
\qquad \bnflit{Unary} \bnflit{.} \bnflit{expression} \\
\\
\bnfnt{union-type} ::= \\
\qquad \bnflit{UnionType} \bnflit{.} \bnflit{typeAlternative} \bnfnt{integer} \\
\\
\bnfnt{variable} ::= \\
\qquad \bnflit{Variable} \bnflit{.} ( \bnflit{type} \bnfor{} \bnflit{initializer} ) \\
\\
\bnfnt{while-loop} ::= \\
\qquad \bnflit{WhileLoop} \bnflit{.} ( \bnflit{condition} \bnfor{} \bnflit{statement} ) \\
\\
\bnfnt{wildcard} ::= \\
\qquad \bnflit{Wildcard} \bnflit{.} \bnflit{bound} \\
\\
\end{tabbing}
\subsection{Annotations\label{annotations-grammar}}
This section describes the details of how annotations are defined, how
annotations are used, and the different kinds of annotations in an annotation
file.
\subsubsection{Annotation definitions\label{annotation-definitions}}
An annotation definition describes the annotation's fields and their
types, so that they may be referenced in a compact way throughout the
annotation file. Any annotation that is used in an annotation file
% either on a program element or as a field of another annotation definition.
must be defined before use.
(This requirement makes it impossible to define, in an
annotation file, an annotation that is meta-annotated with itself.)
The two exceptions to this rule are the \code{@java.lang.annotation.Target} and
\code{@java.lang.annotation.Retention} meta-annotations. These meta-annotations
are often used in annotation definitions so for ease of use are they not required to
be defined themselves.
In the annotation file, the annotation definition appears within the
package that defines the annotation. The annotation may be applied to
elements of any package.
Note that these annotation definitions should not be confused with the
\code{@interface} syntax used in a Java source file to declare an annotation. An
annotation definition in an annotation file is only used internally. An
annotation definition in an annotation file will often mirror an
\code{@interface} annotation declaration in a Java source file in order to use
that annotation in an annotation file.
% TODO, see https://github.com/typetools/annotation-tools/issues/25
% The Annotation File Utilities can read annotation definitions from the
% classpath, so it is optional to define them in the annotation file.
\begin{tabbing}
\qquad \= \kill
\bnfnt{annotation-definition} ::= \\
\qquad \bnfcmt The \bnfnt{decl-annotation}s are the meta-annotations on this annotation. \\
\qquad \bnflit{annotation} \bnflit{@}\bnfnt{name}
\bnflit{:} \bnfnt{decl-annotation}* \lineend \\
\qquad \bnfnt{annotation-field-definition}* \\
\\
\bnfnt{annotation-field-definition} ::= \\
\qquad \bnfnt{annotation-field-type} \bnfnt{name} \lineend \\
\\
\bnfnt{annotation-field-type} ::= \\
\qquad \bnfcmt \bnfnt{primitive-type} is any Java primitive type (\code{int}, \code{boolean}, etc.). \\
\qquad \bnfcmt These are described in detail in Section~\ref{types-and-values}. \\
\qquad (\bnfnt{primitive-type} \bnfor{} \bnflit{String} \bnfor{} \bnflit{Class}
\bnfor{} (\bnflit{enum} \bnfnt{name}) \bnfor{} (\bnflit{annotation-field} \bnfnt{name})) \bnflit{[]}? \\
\qquad \bnfor{} \bnflit{unknown[]} \lineend
\end{tabbing}
\subsubsection{Annotation uses\label{annotation-uses}}
Java SE 8 has two kinds of annotations: ``declaration annotations'' and ``type
annotations''. Declaration annotations can be written only on method formal
parameters and the declarations of packages, classes, methods, fields, and local
variables. Type annotations can be written on any use of a type, and on type
parameter declarations. Type annotations must be meta-annotated
with \code{ElementType.TYPE\_USE} and/or \code{ElementType.TYPE\_PARAMETER}.
These meta-annotations are described in more detail in the JSR 308
specification~\cite{JSR308-webpage-201310}.
The previous rules have used two productions for annotation uses in an
annotation file: \bnfnt{decl-annotation} and \bnfnt{type-annotation}.
The \bnfnt{decl-annotation} and \bnfnt{type-annotation} productions use the same
syntax to specify an annotation. These two different rules exist only to show
which type of annotation is valid in a given location. A declaration annotation
must be used where the \bnfnt{decl-annotation} production is used and a type
annotation must be used where the \bnfnt{type-annotation} production is used.
The syntax for an annotation is the same as in a Java source file.
\begin{tabbing}
\qquad \= \kill
\bnfnt{decl-annotation} ::= \\
\qquad \bnfcmt \bnfnt{annotation} must be a declaration annotation. \\
\qquad \bnfnt{annotation} \\
\\
\bnfnt{type-annotation} ::= \\
\qquad \bnfcmt \bnfnt{annotation} must be a type annotation. \\
\qquad \bnfnt{annotation} \\
\\
\bnfnt{annotation} ::= \\
\qquad \bnfcmt The name may be the annotation's simple name, unless the file \\
\qquad \bnfcmt contains definitions for two annotations with the same simple name. \\
\qquad \bnfcmt In this case, the fully-qualified name of the annotation name is required. \\
% TODO:
% Perhaps we could add that if a class is in the same package
% as an annotation it may always use the simple name (even if there's another
% annotation with the same simple name in another package)? - MP 06/28
\qquad \bnflit{@}\bnfnt{name} [ \bnflit{(} \bnfnt{annotation-field} [ \bnflit{,} \bnfnt{annotation-field} ]+ \bnflit{)} ] \\
\\
\bnfnt{annotation-field} ::= \\
\qquad \bnfcmt In Java, if a single-field annotation has a field named \\
\qquad \bnfcmt ``\code{value}'', then that field name may be elided in uses of the\\
\qquad \bnfcmt annotation: ``\code{@A(12)}'' rather than ``\code{@A(value=12)}''. \\
\qquad \bnfcmt The same convention holds in an annotation file. \\
\qquad \bnfnt{name} \bnflit{=} \bnfnt{value}
\end{tabbing}
\noindent
Certain Java elements allow both declaration and type annotations (for example,
formal method parameters). For these elements, the \bnfnt{type-annotations}
rule is used to differentiate between the declaration annotations and the type
annotations.
\begin{tabbing}
\qquad \= \kill
\bnfnt{type-annotations} ::= \\
\qquad \bnfcmt holds the type annotations, as opposed to the declaration annotations. \\
\qquad \bnflit{type:} \bnfnt{type-annotation}* \lineend \\
\qquad \bnfnt{compound-type}*
\end{tabbing}
\paragraph{Compound type annotations}
A compound type is a parameterized, wildcard, array, or nested type. Annotations
may be on any type in a compound type. In order to specify the location of an
annotation within a compound type we use a ``type path''. A
type path is composed one or more pairs of type kind and type argument index.
\begin{tabbing}
\qquad \= \kill
\bnfnt{type-kind} ::= \\
\qquad \bnflit{0} \bnfcmt annotation is deeper in this array type \\
\qquad \bnfor{} \bnflit{1} \bnfcmt annotation is deeper in this nested type \\
\qquad \bnfor{} \bnflit{2} \bnfcmt annotation is on the bound of this wildcard type argument \\
\qquad \bnfor{} \bnflit{3} \bnfcmt annotation is on the i'th type argument of this parameterized type \\
\\
\bnfnt{type-path} ::= \\
\qquad \bnfcmt The \bnfnt{integer} is the type argument index. \\
\qquad \bnfnt{type-kind} \bnflit{,} \bnfnt{integer} [ \bnflit{,} \bnfnt{type-kind} \bnflit{,} \bnfnt{integer} ]* \\
\\
\bnfnt{compound-type} ::= \\
\qquad \bnflit{inner-type} \bnfnt{type-path} \bnflit{:} \bnfnt{annotation}* \lineend
\end{tabbing}
\noindent
The type argument index used in the \bnfnt{type-path} rule must be \bnflit{0} unless the \bnfnt{type-kind} is
\bnflit{3}. In this case, the type argument index selects which type argument
of a parameterized type to use.
\urldef\cftp\url|https://checkerframework.org/jsr308/specification/java-annotation-design.html#class-file:ext:type_path|
Type paths are explained in more detail, with many examples to ease
understanding, in Section 3.4 of the JSR 308 Specification.\footnotemark
\footnotetext{\cftp}
\section{Example\label{example}}
Consider the code of Figure~\ref{fig:java-example}.
Figure~\ref{fig:annotation-file-examples} shows two legal annotation files
each of which represents its annotations.
\begin{figure}
\begin{verbatim}
package p1;
import p2.*; // for the annotations @A through @D
import java.util.*;
public @A(12) class Foo {
public int bar; // no annotation
private @B List<@C String> baz;
public Foo(@D("spam") Foo this, @B List<@C String> a) {
@B List<@C String> l = new LinkedList<@C String>();
l = (@B List<@C String>)l;
}
}
\end{verbatim}
\caption{Example Java code with annotations.}
\label{fig:java-example}
\end{figure}
\begin{figure}
\begin{tabular}{|c|c|}
\hline
\begin{minipage}[t]{.5\textwidth}
\begin{verbatim}
package p2:
annotation @A:
int value
annotation @B:
annotation @C:
annotation @D:
String value
package p1:
class Foo: @A(value=12)
field bar:
field baz: @B
inner-type 0: @C
method <init>(
Ljava/util/List;)V:
parameter 0: @B
inner-type 0: @C
receiver: @D(value="spam")
local 1 #3+5: @B
inner-type 0: @C
typecast #7: @B
inner-type 0: @C
new #0:
inner-type 0: @C
\end{verbatim}
\end{minipage}
&
\begin{minipage}[t]{.45\textwidth}
\begin{verbatim}
package p2:
annotation @A
int value
package p2:
annotation @B
package p2:
annotation @C
package p2:
annotation @D
String value
package p1:
class Foo: @A(value=12)
package p1:
class Foo:
field baz: @B
package p1:
class Foo:
field baz:
inner-type 0: @C
// ... definitions for p1.Foo.<init>
// omitted for brevity
\end{verbatim}
\end{minipage}
\\
\hline
\end{tabular}
\caption{Two distinct annotation files each corresponding to the code of
Figure~\ref{fig:java-example}.}
\label{fig:annotation-file-examples}
\end{figure}
\section{Types and values\label{types-and-values}}
The Java language permits several types for annotation fields: primitives,
\code{String}s, \code{java.lang.Class} tokens (possibly parameterized),
enumeration constants, annotations, and one-dimensional arrays of these.
These \textbf{types} are represented in an annotation file as follows:
\begin{itemize}
\item Primitive: the name of the primitive type, such as \code{boolean}.
\item String: \code{String}.
\item Class token: \code{Class}; the parameterization, if any, is not
represented in annotation files.
\item Enumeration constant: \code{enum} followed by the binary name of
the enumeration class, such as \code{enum java.lang.Thread\$State}.
\item Annotation: \code{@} followed by the binary name of the annotation type.
\item Array: The representation of the element type followed by \code{[]}, such
as \code{String[]}, with one exception: an annotation definition may specify
a field type as \code{unknown[]} if, in all occurrences of that annotation in
the annotation file, the field value is a zero-length array.\footnotemark
\footnotetext{There is a design flaw in the format of array field values in a
class file. An array does not itself specify an element type; instead, each
element specifies its type. If the annotation type \code{X} has an array field
\code{arr} but \code{arr} is zero-length in every \code{@X} annotation in the
class file, there is no way to determine the element type of \code{arr} from the
class file. This exception makes it possible to define \code{X} when the class
file is converted to an annotation file.}
\end{itemize}
Annotation field \textbf{values} are represented in an annotation file as follows:
\begin{itemize}
\item Numeric primitive value: literals as they would appear in Java source
code.
\item Boolean: \code{true} or \code{false}.
\item Character: A single character or escape sequence in single quotes, such
as \code{'A'} or \code{'\char`\\''}.
\item String: A string literal as it would appear in source code, such as
\code{"\char`\\"Yields falsehood when quined\char`\\" yields falsehood when quined."}.
\item Class token: The binary name of the class (using \code{\$} for
inner classes) or the name of the primitive type or \code{void}, possibly
followed by \code{[]}s representing array layers, followed by \code{.class}.
Examples: \code{java.lang.Integer[].class}, \code{java.util.Map\$Entry.class},
and \code{int.class}.
\item Enumeration constant: the name of the enumeration constant, such as
\code{RUNNABLE}.
\item Array: a sequence of elements inside \code{\char`\{\char`\}} with a comma
between each pair of adjacent elements; a comma following the last element is
optional as in Java. Also as in Java, the braces may be omitted if the
array has only one element.
Examples: \code{\char`\{1\char`\}}, \code{1},
\code{\char`\{true, false,\char`\}} and \code{\char`\{\char`\}}.
\end{itemize}
The following example annotation file shows how types and values are represented.
\begin{verbatim}
package p1:
annotation @ClassInfo:
String remark
Class favoriteClass
Class favoriteCollection // it's probably Class<? extends Collection>
// in source, but no parameterization here
char favoriteLetter
boolean isBuggy
enum p1.DebugCategory[] defaultDebugCategories
@p1.CommitInfo lastCommit
annotation @CommitInfo:
byte[] hashCode
int unixTime
String author
String message
class Foo: @p1.ClassInfo(
remark="Anything named \"Foo\" is bound to be good!",
favoriteClass=java.lang.reflect.Proxy.class,
favoriteCollection=java.util.LinkedHashSet.class,
favoriteLetter='F',
isBuggy=true,
defaultDebugCategories={DEBUG_TRAVERSAL, DEBUG_STORES, DEBUG_IO},
[email protected](
hashCode={31, 41, 59, 26, 53, 58, 97, 92, 32, 38, 46, 26, 43, 38, 32, 79},
unixTime=1152109350,
author="Joe Programmer",
message="First implementation of Foo"
)
)
\end{verbatim}
\section{Alternative formats\label{alternative-formats}}
We mention multiple alternatives to the format described in this document.
Each of them has its own merits.
In the future, the other formats could be implemented, along with tools for
converting among them.
% Then, we can see which of the formats programmers prefer in practice.
An alternative to the format described in this document would be XML\@.
% It would be easy to use an XML format to augment the one proposed here, but
XML does not seem to provide any compelling advantages. Programmers
interact with annotation files in two ways: textually (when reading, writing,
and editing annotation files) and programmatically (when writing
annotation-processing tools). Textually, XML can be
very hard to read; style sheets mitigate this
problem, but editing XML files remains tedious and error-prone.
Programmatically, a layer of abstraction (an API) is needed in any event, so it
makes little difference what the underlying textual representation is.
XML files are easier to parse, but the parsing code only needs to be
written once and is abstracted away by an API to the data structure.
Another alternative is a format like the \code{.spec}/\code{.jml} files
of JML~\cite{LeavensBR2006:JML}. The format is similar to Java code, but
all method bodies are empty, and users can annotate the public members of a
class. This is easy for Java programmers to read and understand. (It is a
bit more complex to implement, but that is not particularly germane.)
Because it does not permit complete specification of a class's annotations
(it does not permit annotation of method bodies), it is not appropriate for
certain tools, such as type inference tools. However, it might be desirable
to adopt such a format for public members, and to use the format
described in this document primarily for method bodies.
The Checker Framework~\cite{DietlDEMS2011,CF} uses two additional formats for
annotations. The first format is called ``stub files.'' A stub file is similar
to the \code{.spec}/\code{.jml} files described in the previous paragraph. It
uses Java syntax, only allows annotations on method headers and does not require
method bodies. A stub file is used to add annotations to method headers of
existing Java classes. For example, the Checker Framework uses stub files to add
annotations to method headers of libraries (such as the JDK) without modifying
the source code or bytecode of the library. A single stub file can contain
multiple packages and classes. This format only allows annotations on method
headers, not class headers, fields, and method bodies like in a \code{.jaif}
file. Further, stub files are only used by the Checker Framework at run time,
they cannot be used to insert annotations into a source or classfile.
The Checker Framework also uses a format called an ``annotated JDK.'' The
annotated JDK is a \code{.jar} file containing the JDK with annotations. It is
created with the Annotation File Utilities, but the annotations are stored in a
format similar to a stub file, instead of in a \code{.jaif} file. The annotated
JDK starts with a source file for each file in the JDK to be annotated. Like a
stub file, each source file only contains method headers with annotations. The
annotated JDK also supports annotations in the class header. To build the
annotated JDK \code{.jar} file, the source files are compiled, then the
\code{extract-annotations} script is run on them to generate a \code{.jaif} file
for each source file. The \code{insert-annotations} script then inserts the
annotations contained in each \code{.jaif} file into the corresponding JDK class
file. These are then packaged up into a single \code{.jar} file. Like a stub
files, the annotated JDK is easier to read and write since it uses Java syntax.
However, the annotated JDK requires a different file for each original Java
source file. It does not allow annotations on fields and in method bodies. The
annotated JDK also only contains annotations in the JDK and not other Java
files.
Eclipse defines its own file format for external nullness annotations:
\url{https://wiki.eclipse.org/JDT_Core/Null_Analysis/External_Annotations#File_format}.
It works only for nullness annotations. It is more compact but less
readable than the Annotation File Format. It is intended for tool use, not
for editing by ordinary users, who are expected to interact with it
via the Eclipse GUI\@.
\bibliographystyle{alpha}
\bibliography{annotation-file-format,bibstring-unabbrev,types,ernst,invariants,generals,alias,crossrefs}
\end{document}
% LocalWords: java javac OuterClass InnerClass TODO Kleene MP subannotations
% LocalWords: enum arr quined int pt instanceof RUNTIME JVML ILjava boolean
% LocalWords: programmatically jml ernst jaif whitespace 0pt decl enums
% LocalWords: filenames typeparam javap init clinit ast un lowercased io
% LocalWords: ExpressionStatement AnnotatedType underlyingType ArrayType
% LocalWords: ArrayAccess leftOperand rightOperand CompoundAssignment
% LocalWords: ConditionalExpression trueExpression falseExpression i'th
% LocalWords: DoWhileLoop EnhancedForLoop ForLoop thenStatement NewArray
% LocalWords: elseStatement InstanceOf LabeledStatement LambdaExpression
% LocalWords: MemberReference qualifierExpression typeArgument NewClass
% LocalWords: MemberSelect MethodInvocation methodSelect classBody
% LocalWords: enclosingExpression ParameterizedType finallyBlock AScene
% LocalWords: TypeCast UnionType typeAlternative WhileLoop ElementType
% LocalWords: AClass AMethod AElement objectweb anno tations parseScene
% LocalWords: CriterionList isSatisifiedBy CriteriaList afu getPositions
% LocalWords: InPackageCriterion InClassCriterion InMethodCriterion
% LocalWords: ParamCriterion inserter RUNNABLE ASM src asm staticinit
%% LocalWords: instanceinit typearg IntersectionType TypeParameter
%% LocalWords: classfile crossrefs
| {
"alphanum_fraction": 0.7250103212,
"avg_line_length": 40.9432048682,
"ext": "tex",
"hexsha": "7b1fe9ae53ee92ce03fc5d9f329ac19ebcc57611",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "de3d1406dfa49a96c457ec2085ad5455d75eef1f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jyluo/annotation-tools",
"max_forks_repo_path": "annotation-file-utilities/annotation-file-format.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "de3d1406dfa49a96c457ec2085ad5455d75eef1f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jyluo/annotation-tools",
"max_issues_repo_path": "annotation-file-utilities/annotation-file-format.tex",
"max_line_length": 187,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "de3d1406dfa49a96c457ec2085ad5455d75eef1f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jyluo/annotation-tools",
"max_stars_repo_path": "annotation-file-utilities/annotation-file-format.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 17343,
"size": 60555
} |
% !TEX root = altosaar-2020-thesis.tex
\chapter{Background}
\label{ch:background}
\lettrine[image=true,lines=3]{design/T}{his} chapter describes probabilistic models and probabilistic inference, taking as examples models from statistical physics and recommender systems.
\input{fig/fig_graphical_model_ising}
\section{Probabilistic Models}
Probability models assign probability to configurations of random variables. The random variables in a probability model might correspond to observed variables in a physical system, or to latent properties representing patterns in data collected from the world, or a combination of both. To define a probability model, it is necessary to specify the density $p$ of a collection of random variables $\mbz$. We focus on probabilistic models $p(\mbz)$ where relationships between random variables can be encoded as edges in a graph, or probabilistic graphical models~\citep{jordan2004graphical}.
\subsection{Example: Ising Model}
\label{sec:ising}
For example, consider a model used in statistical physics: the Ising model. The Ising model can be used to model interactions between atoms in a material~\citep{henelius2016refrustration} to study how the material behaves in different conditions, paving the way toward material design. This probabilistic model has binary random variables $z_n$ with density
\begin{equation}
p(\mbz; \beta) = \frac{\exp(-\beta E(\mbz))}{\cZ}\, .
\label{eq:boltzmann}
\end{equation}
The semicolon in \Cref{eq:boltzmann} denotes that the model has a parameter $\beta$, representing the reciprocal temperature of the system of random variables (a physical quantity). The energy function $E(\mbz)$ encodes the relationships between random variables, and $\cZ$, the normalizing constant, ensures that this probability distribution sums to one over all configurations of random variables~\citep{chandler1987introduction}. The energy function of the Ising model is\footnote{Bold letters can denote collections of random variables $\mbz = \{z_1, z_2,...,z_N\}$, or vectors, depending on the context.}
\begin{equation}
\label{eq:ising-energy}
E(\mbz) = -\frac{1}{2}\sum_{i, j} J_{ij}z_i z_j - H\sum_i z_i\, .
\end{equation}
The interaction strength $J_{ij}$ defines the interactions between random variables. In a simple Ising model, only nearest neighbors interact, so $J_{ij}$ is nonzero if the random variables $z_i$ and $z_j$ are neighbors. The parameter $H$ increases or decreases the energy in proportion to the values of the random variables $z_i$; we give its physical interpretation later.
The Ising model can be represented as a probabilistic graphical model, shown in \Cref{fig:graphical-model-ising}. Two variables $z_i$ and $z_j$ interact (changing the value of one leads to a change in probability of the other) only if they share an edge in the graph. This representation works in conjuction with the density in \Cref{eq:boltzmann}, as the presence of an edge in the graph corresponds to two variables interacting in the energy function $E$. In this model, the energy function (and hence graph) is such that only neighboring random variables interact.
The Ising model can be used to study physical systems such as magnetic materials, where interactions between atoms can be encoded into the interaction strength $J_{ij}$. The interactions between random variables encoded in this manner contain the necessary information to model the properties of a material. In modeling a material, the random variables $\mbz$ can be referred to as spins. Spin is a type of angular momentum carried by particles comprising atoms, and such angular momentum causes a magnetic field. Although the random variables $\mbz$ are binary, taking on values of $-1$ and $+1$, they can be re-scaled to the magnetic strength of the atoms in a particular material of interest if comparison to experimental data is required. The parameter $H$ can be interpreted as the magnitude of an external magnetic field that interacts with the magnetic strength and orientation of every atom~\citep{chandler1987introduction}.
To see how well an Ising model mirrors a physical material, a property such as magnetization can be measured in the material, and calculated using the model. Magnetization is the average orientation of the magnetic strength of every atom or random variable in the material,
\begin{equation}
M(\mbz) = \frac{1}{N}\sum_{i=1}^N z_i\, .
\end{equation}
By measuring the magnetization $M$ and computing its value in the Ising model, a practitioner can deduce how accurately the model reproduces experimental data. For example, if an Ising model with nearest neighbors ($J_{ij} \neq 0$ if $i$ neighbors $j$) does not accurately reproduce the magnetization of a physical material, it may be necessary to include second-nearest neighbor effects ($J_{ij}\neq 0$ if $i$ and $j$ are connected by a path of length at most two).
Another example of a quantity that can be measured experimentally and computed in a probabilistic model is the thermodynamic free energy $F$,
\begin{equation}
\label{eq:free-energy}
F = -\frac{1}{\beta} \log \cZ\, .
\end{equation}
The free energy of a system relates to the amount of energy that can be extracted from a system by its surroundings. For example, the free energy of a protein is used to understand its stability, and can be measured by the amount of energy needed to destroy its structure by denaturing it~\citep{stone2013the-theory}. In modeling a magnetic material or biological material, the free energy can be derived from the normalizing constant $\cZ$~\citep{chandler1987introduction}.
\input{fig/tab_example_meals_regression}
\subsection{Example: Binary Classification}
\input{fig/fig_graphical_model_regression}
Another example of a probabilistic model is a binary classifier~\citep{bishop2006pattern}, represented as a graphical model in \Cref{fig:graphical-model-regression}. Consider $N$ datapoints of the form $(x_n, y_n)$ consisting of covariates $x_n$ and binary responses $y_n$. As illustrated in \Cref{tab:example-binary}, the covariates $x_n$ might represent information about items such as foods in a meal, and $y_n$ may indicate whether a single user ate a meal with those foods. A binary classifier would then classify whether the user would eat a new meal $\hat{x}_n$ based on its constituent foods.
A binary classifier is defined using a regression function $f$ with parameters $\mbtheta$. The logistic function $\sigma$ applied to the regression function defines the probability model for a binary classifier,
\begin{equation}
p(y_n \mid x_n; \mbtheta) = \frac{\exp\left( \sigma(f(x_n; \mbtheta)) \cdot y_n\right)}{\cZ}\, .
\label{eq:binary-classification}
\end{equation}
The logistic function constrains the output of $f$ to the unit interval, and $\cZ$ is again the normalizing constant. The regression function $f$ uses information about a datapoint to classify whether the response $y_n$ is positive. An example of a regression function is an inner product, defined by
\begin{equation}
f(x_n; \mbtheta) = \mbtheta^\top x_n\, ,
\end{equation}
which corresponds to logistic regression~\citep{bishop2006pattern}. Alternatively, a more flexible model can be built using a deep neural network~\citep{lecun2015deep}.
%A binary classifier can form the basis of a recommender system as we study in \Cref{ch:rfs}.
\section{Inference}
In a probability model, computing---or, inferring---properties of the probability distribution is a central task. One inference problem is to ascertain likely configurations of random variables. Another is to compute the sum of a probability distribution over a set of random variables, for example, to compute the normalizing constant~\citep{jordan2004graphical}.
\subsection{Computing Likely Configurations of Random Variables}
In the study of a probability model such as a binary classifier in \Cref{eq:binary-classification}, one question of interest is: for a set of observations $(x_n, y_n)$, what is a likely value of $\theta$? Maximum likelihood estimation is one way to answer this question~\citep{bishop2006pattern}.
A probability distribution like $p(\mby \mid \mbx; \mbtheta)$ is also known as a likelihood function. It defines the likelihood of a random variable $\mby$ conditional on the value of data $\mbx$, with the current setting of the parameters $\mbtheta$. The maximum likelihood estimate of the parameters of this probability model for the data $(\mbx, \mby)$ is given by
\begin{equation}
\mbtheta^* = \argmax_{\mbtheta} p(\mby \mid \mbx; \mbtheta)\, .
\end{equation}
This maximum likelihood estimate of the parameters $\mbtheta^*$ can be computed using stochastic optimization if the data is large~\citep{robbins1951a-stochastic}. % what if the argmax is intractable, or an integral? #todo details
\subsection{Computing the Normalizing Constant}
The second central inference task in probabilistic modeling is summing a probability model over a set of random variables. One example of this is computing the normalizing constant $\cZ$. This inference problem requires computing a sum: the normalizing constant ensures a probability distribution sums to $1$ over values the random variables can take.
Consider computing the normalizing constant for the binary classifier in \Cref{eq:binary-classification}. To compute the normalizing constant $\cZ$ for this probability model, we can sum over the binary values the random variable $y_n$ can take,
\begin{align}
1 &= \sum_{y_n \in \{0, 1\}} \frac{\exp\left( \sigma(f(x_n; \mbtheta)) \cdot y_n\right)}{\cZ} \\
\Rightarrow \cZ &= \sum_{y_n \in \{0, 1\}} \exp\left( \sigma(f(x_n; \mbtheta) \cdot y_n)\right)\\
\cZ &= 1 + \exp\left( \sigma(f(x_n; \mbtheta))\right)\, .
\end{align}
Inference of the normalizing constant $\cZ$ is straightforward in this probability model. The random variable $y_n$ is binary, so there are only two terms in the sum needed to compute the normalizing constant.
Next, consider computing the normalizing constant or partition function for the Ising model in \Cref{eq:boltzmann}. The random variables $z_n$ in this model also take on binary values. The partition function is computed by summing over all the values associated with all random variables in the system, $\mbz = \{z_1, \ldots, z_N\}$:
\begin{align}
1 &= \sum_{z_1 \in \{-1, +1\}} \ldots \sum_{z_N \in \{-1, +1\}} \frac{\exp(-\beta E(\mbz))}{\cZ}\\
\Rightarrow \cZ &= \sum_{z_1 \in \{-1, +1\}} \ldots \sum_{z_N \in \{-1, +1\}} \exp(-\beta E(\mbz))\, .
\label{eq:intractable-partition}
\end{align}
There are $N$ binary-valued random variables and $2^N$ terms in the sum required to compute the partition function, so inference in the Ising model is difficult. For Ising models used to study materials, the partition function is intractable to compute for most model sizes practitioners want to study and compare to physical realizations.
One way to address the issue of an intractable partition function is with sampling methods, such as Markov chain Monte Carlo~\citep{metropolis1953equation}. These algorithms enable inference by simulating likely configurations of random variables. These samples of likely configurations are used to approximate quantities of interest such as the partition function. But, Markov chain Monte Carlo methods are difficult to scale to probabilistic models with large numbers of correlated random variables. In this thesis, we instead use variational inference, an approximate inference algorithm that relies on optimization instead of sampling.
% Calculating the partition function can be difficult, and there are many ways around computing the partition function. For example, sampling methods and variational methods can be used to approximate properties of distributions such as properties derived from the partition function. Markov chain Monte Carlo~\citep{metropolis1953equation} allows sampling system configurations from the Boltzmann distribution of a model; these samples can be used to approximate physical quantities. Variational inference relies on optimizing (varying) functionals to derive approximations of distributions of interest, these approximations can be used to compute properties of a model. Variational inference has roots in mean field methods in physics~\citep{saul1996mean,hoffman2013stochastic,blei2017variational} as described in \Cref{ch:background}.
\section{Variational Inference}
\input{fig/fig_vi_cartoon}
Instead of working with a probability model $p(\mbz)$ directly, \acrfull{vi} posits a family of distributions $q(\mbz; \mblambda)$ indexed by parameters $\mblambda$~\citep{blei2017variational}. The goal of \gls{vi} is to find the closest member of the variational family $q$ to the target distribution $p$. The algorithm consists of varying the parameters $\mblambda$ to improve the quality of the approximation, as illustrated in \Cref{fig:vi-cartoon}. One way to measure the distance between the variational approximation and the target distribution is with the \acrfull{kl} divergence, or relative entropy~\citep{mackay2003information,ranganath2018black}.
The intractable partition function in $p(\mbz)$ appears in the \gls{kl} divergence \gls{vi} uses to assess distance,
\begin{equation}
\label{eq:kl}
\KL{q(\mbz; \mblambda)}{p(\mbz)} = \E_q[\log q(\mbz ; \mblambda)] -\E_q[\log p(\mbz)] \\
\end{equation}
But it is possible to derive an objective function that does not depend on the partition function, starting from the \gls{kl} divergence. Taking the Ising model in \Cref{eq:boltzmann} as an example,
\begin{align}
\KL{q(\mbz; \mblambda)}{p(\mbz)} &= \E_q[\log q(\mbz ; \mblambda)] -\E_q[\log p(\mbz)] \\
\KL{q(\mbz; \mblambda)}{p(\mbz)} &= \E_q[\log q(\mbz ; \mblambda)] -\E_q[-\beta E(\mbz) - \log \cZ] \\
\log \cZ &= \E_q[-\beta E(\mbz)] - \E_q[\log q(\mbz ; \mblambda)] + \KL{q(\mbz; \mblambda)}{p(\mbz)} \label{eq:second-last} \\
\Rightarrow \log \cZ \geq \cL(\mblambda) &\coloneq \E_q[-\beta E(\mbz)] - \E_q[\log q(\mbz ; \mblambda)]\, .
\label{eq:llbo}
\end{align}
This lower bound $\cL$ on the log normalizing constant is also called the \acrfull{elbo}, and serves as the objective function for \gls{vi}. In deriving this lower bound from \Cref{eq:second-last} to \Cref{eq:llbo}, we used the fact that the \gls{kl} is greater than or equal to zero. To show this fact, we start from Jensen's inequality for a convex function $f$, or
\begin{equation}
f(\E[\mbz]) \leq \E[f(\mbz)]\, .
\end{equation}
The logarithm in the \gls{kl} is concave, so its negative is convex. We apply Jensen's inequality to the negative \gls{kl} in \Cref{eq:kl}:
\begin{align}
-\KL{q(\mbz)}{p(\mbz)} &= \E_q\left[\log \frac{p(\mbz)}{q(\mbz )}\right] \\
&\leq \log \E_q\left[\frac{p(\mbz)}{q(\mbz )}\right]\\
&= \log \int q(\mbz) \frac{p(\mbz)}{q(\mbz )}d\mbz \\
&= \log \int p(\mbz)d\mbz \\
&= 0 \, .
\end{align}
This shows that the \gls{kl} is greater than or equal to zero~\citep{cover2012elements}.
The left-hand-side in \Cref{eq:llbo} does not change as the variational parameters $\mblambda$ are varied in $\cL(\mblambda)$. In words, maximizing the lower bound $\cL(\mblambda)$ is equivalent to minimizing the \gls{kl} divergence between the variational approximation and target probability model.
\subsection{Example: Mean Field Variational Inference in the Ising model}
\label{sec:ising-mean-field}
To demonstrate \gls{vi}, we use the Ising model described in \Cref{sec:ising} with probability distribution $p(\mbz)$ defined in \Cref{eq:boltzmann} and energy function $E(\mbz)$ in \Cref{eq:ising-energy}. Inspecting the intractable partition function of the Ising model can help construct a variational family $q(\mbz; \mblambda)$ to approximate the Ising model.
The Ising model partition function in \Cref{eq:intractable-partition} is intractable because the sums do not decompose by random variables: every sum must be carried out in order, because the result of the $N$th sum over the random variable $z_N$ depends on the results of the sums over the previous $N-1$ random variables. This is because of interactions between dependent random variables. The first term in the energy function of the Ising model represents nearest neighbor interactions, $z_iz_j$, and is graphically equivalent to the links between nearest neighbors in \Cref{fig:graphical-model-ising}.
However, the second term in the Ising energy function in \Cref{eq:ising-energy}, $H\sum_i z_i$, does decompose by random variable. Physically, this corresponds to a magnetic field applied to the system as a whole, so every random variable is subject to the same force. Mathematically, there is an outer sum over every configuration of random variables, and in this term the results of the summation over a variable $z_i$ do not affect the summation over another variable $z_j$. So this magnetic field term can be evaluated for systems with many random variables.
\input{fig/fig-ising-markov-blanket}
The structure of the Ising model energy function and corresponding graphical model can be used to build a variational approximation $q(\mbz; \mblambda)$ as follows. If the second term of the Ising model energy function does not lead to an intractable partition function due to every random variable being subject to a magnetic field, one can construct a variational approximation by extending this physical intuition and developing the concept of a `mean field'. Consider the central random variable $z_i$ in \Cref{fig:graphical-model-ising}. Fixing the values of its nearest neighbors renders this random variable independent of the rest of the graph as shown in \Cref{fig:markov-blanket-ising}. The nearest neighbors of the central random variable can then be interpreted as giving rise to a magnetic field. The strength of this magnetic field is unknown, so we can define this unknown strength as a variational parameter $\delta H$ that we will infer using \gls{vi}. This mean field is additive to the external magnetic field $H$ applied to the system as a whole, so the energy function for the central random variable $z_i$ under this mean field assumption can be written
\begin{equation}
E_{\mf}(z_i; \delta H) = \delta H z_i + H z_i\, .
\end{equation}
Note that we have replaced the interaction term $J_{ij} z_iz_j$ in the Ising model energy function in \Cref{eq:ising-energy} by the mean field $\delta H$. The mean field assumption is that term can approximate the effects of neighboring nodes~\citep{chandler1987introduction}. If we repeat this argument for every node in the graph, we arrive at the mean field energy function
\begin{equation}
E_{\mf}(\mbz; \delta H) = -(H + \delta H)\sum_{i = 1}^N z_i\, .
\label{eq:mean-field-energy}
\end{equation}
The above construction starting from the mean field assumption corresponds to the variational approximation with density
\begin{equation}
q(\mbz; \beta, \delta H) = \prod_{i=1}^N\frac{\exp(- \beta E_\mf(z_i; \delta H))}{\cZ_\mf} \, ,
\label{eq:mean-field-distribution}
\end{equation}
and we see that the variational parameter $\mblambda$ is simply the mean field strength $\delta H$. The mean field variational approximation corresponds to a fully factorized probability distribution where every random variable is independent~\citep{wainwright2008graphical}. This is a useful property, as the partition function is tractable in this mean field variational approximation: we can compute the partition function for every random variable by itself. The partition function for a single random variable $z_i$ under the mean field assumption is straightforward,
\begin{align}
\cZ_\textrm{\mf, i} &= \sum_{z_i \in \{-1, +1\}} \exp(- \beta (H + \delta H) z_i) \\
&= 2 \cosh (\beta (H + \delta H)) \, ,
\label{eq:partition-function-i}
\end{align}
and the partition function for the variational approximation for all variables is simply $\cZ_\mf = \cZ_\textrm{\mf, i}^N$. Similarly, the average of a random variable under the variational distribution is readily computed as
\begin{align}
\begin{split}
\E_{q(z_i)}[z_i] &= \sum_{z_i \in \{-1, +1\}} \frac{z_i \exp(- \beta E_\mf(z_i; \delta H))}{\cZ_\textrm{\mf, i}} \\
&= \sum_{z_i \in \{-1, +1\}} \frac{z_i \exp(- \beta (H + \delta H) z_i)}{2 \cosh(\beta (H + \delta H))} \\
&= -\tanh(\beta (H + \delta H))\, .
\label{eq:mf-mean}
\end{split}
\end{align}
Now that we have constructed a variational family for the Ising model, we can proceed with the \gls{vi} algorithm. The next step is writing down and maximizing the lower bound on the log partition function to minimize the \gls{kl} between our approximating distribution and model.
The lower bound on the log partition function $\cL(\delta H)$ in \Cref{eq:llbo} becomes
\begin{align}
\cL(\delta H) &= \E_q[-\beta E(\mbz)] - \E_q[\log q(\mbz; \delta H)] \\
&= \E_q\left[-\frac{1}{2}\beta\sum_{i, j} J_{ij}z_i z_j - \beta H\sum_i z_i\right] - \E_q\left[ -\beta (H + \delta H)\sum z_i\right] + \log \cZ_{\mf} \\
&= \E_q\left[-\frac{1}{2}\beta\sum_{i, j} J_{ij}z_i z_j + \beta\delta H\sum_i z_i\right] + \log \cZ_{\mf}\, ,
\intertext{and we can take the expectation inside the sum using the fact that the mean field variational distribution is fully factorized, so }
\cL(\delta H) &= -\frac{1}{2}\beta \sum_{i, j}J_{ij}\E_{q(z_i)}[z_i]\E_{q(z_j)}[z_j] + \beta \delta H \sum_i \E_{q(z_i)} [z_i] + \log \cZ_{\mf}\, .\\
\end{align}
In the first term, recall that two random variables $z_i$ and $z_j$ have the same distribution under the mean field assumption, and that every variable interacts with its four nearest neighbors in the Ising model. The lower bound on the log partition function then becomes
\begin{align}
\cL(\delta H) &= -\frac{1} {2} \beta 4JN \E_{q(z_i)}[z_i]^2 + \beta N \delta H \E_{q(z_i)} [z_i] + \log \cZ_{\mf}\, .
\end{align}
The next step in the \gls{vi} algorithm is maximizing this lower bound, to minimize the \gls{kl} divergence between the variational approximation and the model. Taking the derivative with respect to $\delta H$ and suppressing the subscript of the expectation operator, we get
\begin{align}
\frac{\partial\cL(\delta H)}{\partial \delta H} &= N\beta(-4J\E[z_i]\partial_{\delta H}\E[z_i] + \E[z_i] + \delta H \partial_{\delta H} \E[z_i]) + N\beta \tanh(\beta (H + \delta H))\, .
\end{align}
Next, setting this derivative to zero and cancelling out terms (and using \Cref{eq:mf-mean}) leads to
\begin{align}
0 &= -4J\E[z_i]\partial_{\delta H}\E[z_i] + \delta H \partial_{\delta H} \E[z_i]) \\
\Rightarrow \delta H \partial_{\delta H} \E[z_i]) &= 4J\E[z_i]\partial_{\delta H}\E[z_i] \\
\Rightarrow \delta H^* &= 4J\E[z_i]\, .
\end{align}
This shows that under a mean field assumption, the variational parameter that maximizes the lower bound on the log partition function---and hence minimizes the \gls{kl} divergence between the approximation and model---is proportional to the mean field around any node in the system. The structure of the model informs our choice of variational approximation.
The quality of the variational approximation $q(\mbz; \beta, \delta H^*)$ from \gls{vi} can be assessed in several ways. For example, the magnetization $M$ or the free energy $F$ can be calculated using the variational approximation, and these values can be compared to Markov Chain Monte Carlo simulations in small systems. This can be viewed as a type of predictive check for a \gls{vi} algorithm~\citep{blei2014build}. However, the development of theoretical guarantees to assess the quality of variational approximations found with \gls{vi} is an open area of research~\citep{wang2019frequentist}. Practitioners must currently empirically evaluate the quality of variational approximations according to the task at hand, as we do in \Cref{ch:hvm,ch:pvi}.
\subsection{Variational Inference Originated in Statistical Physics}
Previously, we derived a variational approximation to the Ising model by making a mean field assumption. That the language of physics is used in machine learning algorithms such as \gls{vi} is no coincidence. In fact, \citet{feynman1972statistical,feynman2018statistical} derives the \gls{gbf} inequality for use in a variational principle for approximating intractable partition functions using mean field assumptions. Consider a model with energy function $E$ and partition function $\cZ$, and a mean field variational approximation with energy function $E_\mf$ (and corresponding partition function $\cZ_\mf$). Then the \gls{gbf} inequality reads~\citep{feynman1972statistical,feynman2018statistical}
\begin{equation}
\cZ \geq \cZ_\mf\exp\left(-\beta \braket{E - E_\mf}_\mf\right) \, .
\label{eq:gbf-inequality}
\end{equation}
In physics, bra-ket notation is used to denote expectations. For example, expectations with respect to \Cref{eq:mean-field-distribution} are written $\braket{\; \cdot \;}_\mf$. Rewriting the \gls{gbf} with statistics notation for the expectation $\E_q[\;\cdot\;]$ yields
\begin{align}
\cZ &\geq \cZ_\mf\exp\left(-\beta \E_q[E - E_\mf]\right) \, .
\end{align}
Taking the logarithm, we recover the lower bound on the log partition function
\begin{align}
\log \cZ &\geq \E_q[-\beta E] - \E_q[-\beta E_\mf] + \log \cZ_\mf\\
&= \E_q[-\beta E] - \E_q[\log q_\mf(\mbz; \mblambda)] \\
&= \cL(\mblambda)\, .
\end{align}
This is identical to the log partition function lower bound in \Cref{eq:llbo}. \citet{hoffman2013stochastic} review the historical roots of the variational principle in its machine learning incarnation.
To complete the connection to machine learning, we relate this log partition function lower bound to the evidence lower bound studied in the \gls{vi} literature~\citep{blei2017variational}. A probabilistic model of data might have the following process for generating data $\mbx$ using prior information in latent variables $\mbz$:
\begin{align*}
\mbz &\sim p(\mbz)\\
\mbx &\sim p(\mbx \mid \mbz)
\end{align*}
The posterior distribution of this model is computed using Bayes' rule,
\begin{equation*}
p(\mbz \mid \mbx) = \frac{p(\mbx \mid \mbz) p(\mbz)}{p(\mbx)} \, .
\end{equation*}
The model evidence $p(\mbx)$ is the partition function of the posterior. Calculating the partition function is what makes posterior inference difficult, as it requires integration over the latent variables $\mbz$,
\begin{equation*}
p(\mbx) = \int p(\mbx, \mbz) d\mbz \, ,
\end{equation*}
and the latent variables $\mbz$ are typically high-dimensional, such as the number of random variables in an Ising model. But \gls{vi} can be used to approximate this intractable integral. The lower bound on the log partition function becomes the \gls{elbo}:
\begin{align}
\log p(\mbx) &\geq \cL(\mblambda) \\
\cL(\mblambda) &= \E_q[\log p(\mbx, \mbz)] - \E_q[\log q(\mbz ; \mblambda)] \, .
\end{align}
An example of a latent variable model without data is the Ising model---in this case, the data is an empty set, $\mbx = \{\}$. In this case $\cL(\mblambda)$ is a lower bound on the log partition function as we derived in \Cref{eq:llbo} and identical to the \gls{gbf} inequality.
% \gls{vi} is an algorithm to find a good approximation to a target probability distribution that has an intractable integral, such as the sum needed to compute a partition function. We now turn to the second inference problem of computing likely configurations of variables in a probability model.
\section{Conclusion}
We reviewed probability models and gave examples of their use in statistical physics and recommender systems. The task of inference is central to working with probability models; we described variational inference and maximum likelihood estimation. The following chapters address the issue of building the structure of a problem into a performant probability model, whether that structure concerns the connectivity in a statistical physics model, the structure of datapoints in a recommender system, or information about a variational approximation useful in an optimization algorithm for this approximation. | {
"alphanum_fraction": 0.7613261649,
"avg_line_length": 118.7234042553,
"ext": "tex",
"hexsha": "3690e0f61167b60ce693dcda205a1b464e5bd106",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "287484c87db0eca46f4cdae70ff8582bd66ce5a3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "altosaar/thesis",
"max_forks_repo_path": "ch_background.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "287484c87db0eca46f4cdae70ff8582bd66ce5a3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "altosaar/thesis",
"max_issues_repo_path": "ch_background.tex",
"max_line_length": 1175,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "287484c87db0eca46f4cdae70ff8582bd66ce5a3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "altosaar/thesis",
"max_stars_repo_path": "ch_background.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-26T12:18:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-21T18:56:27.000Z",
"num_tokens": 7507,
"size": 27900
} |
\section{Method}
\label{sec:method}
% \nothing{In this section, we present our perception-guided neural scene synthesis for virtual reality. \zh{As a real-time overview: we first predict the the color as well as corresponding density for each pixel in the fovea, mid- and far- periphery (or designed 3D point?) given the camera position. Next we apply ray marching to generate the elemental image. Afterwards, we blend the results from each part and tune the contrast for each eye.}\qisun{merged with the following sentences. see if happy}}
Based on a concentric spherical representation and trained network predicting RGBDs on the spheres (\Cref{sec:method:representation}), our system predominantly comprises two main rendering steps in run-time: synthesizing visual-/stereoscopic-acuity-adaptive elemental images with ray marching for fovea, mid-, and far- peripheries; followed by image-based rendering to composite displayed frames (\Cref{sec:synthesis}).
For desired precision-performance balance, we further craft an analytical spatial-temporal model to optimize the determination of our intra-system variables, including representation sparsity and neural-network complexities (\Cref{sec:method:optimization}).
\input{TOG/fig_notations}
\subsection{Egocentric Neural Representation and Training}
\label{sec:method:representation}
The recent single-object-oriented ``outside-in'' view synthesis methods \cite{sitzmann2019deepvoxels,mildenhall2020nerf} typically represent the training targets using uniform voxelization. However, immersive VR environments introduce unique and open challenges for such parameterization due to the commonly egocentric (first-person) and ``inside-out'' viewing perspective (e.g., \Cref{fig:teaser:scene}).
As a consequence, the neural representation on large virtual environments typically suffer from ghosting artifacts, low resolution, or slow speed (\Cref{fig:teaser:quality}).
%We tailor our method specifically for egocentric (first-person) views in immersive VR environments.
%rendering of immersive viewer-centered ``inside-out'' scenes is challenging owing to the rapid variation of scene content.
\paragraph{Egocentric coordinate}
To tackle this problem, we are inspired by the recent panoramic imagery dis-occlusion methods \cite{Lin:DeepPanorama,Benjamin:2020:RTV,Broxton:immersiveLF}: we depict the rapidly varying first-person views with concentric spherical coordinates. This representation has been shown to allow for robust rendering at real-time rates and 6DoF interaction to navigate inside complex immersive environments.
As visualized in \Cref{fig:notations}, our representation is parameterized with the number of concentric spheres per neural network ($\sphereNum$) and their respective radii ($\mathbf{\sphereRadius}=\{\sphereRadius_i\},i\in [1,\sphereNum]$). \new{Under this spherical system, a given 3D spatial position can be represented as $\SpatialPt=\SpatialPt(\sphereRadius,\theta,\phi)$ where $\theta$ and $\phi$ are two angular numbers.
Similar to \cite{mildenhall2020nerf}, the run-time rendering goal is predicting a 4D vector $(r,g,b,d)$ for each $\SpatialPt$, followed by a view-dependent ray marching through this intermediate function to synthesize individual pixel.
}
Here, $(r,g,b)$ and $d$ are the color and density, respectively.
% \dnc{
% \paragraph{Network structure \textbf{*May need a name for our network?*}}
% Given a camera ray $(\rayo, \rayd)$, $\sphereNum$ points $\{\mathbf{\hat{\SpatialPt}}_i\}$ are sampled along it by calculating its intersections with $\sphereNum$ concentric spheres. Similar to NeRF\cite{mildenhall2020nerf}, we apply a trigonometric input encoding to every $\mathbf{\hat{\SpatialPt}_i}$ before feeding to a multi-layer perception (MLP). MLP is a network composed of $\mlpLayerNum$ fully-connected layers with $\mlpChannelNum$ channels in each layer. Then we integrate the predicted colors and densities through volume rendering method. The main differences between NeRF and our network are the input and output of the MLP component. NeRF takes every encoded coordinates as the input and outputs the corresponding color and density. This leads to extremely slow performance for view synthesis because network inference needs to be performed $\sphereNum$ times for each ray. To tackle this issue, we concatenate all $\sphereNum$ encoded coordinates to a vector and feed the coordinates to the MLP component so we only need to infer once for each ray. The output is the concatenated vector of colors and densities corresponding to input coordinates. As shown in \autoref{sec:result}, it is possible to achieve high-quality real-time egocentric and stereoscopic view synthesis through our network. \textbf{*May need some reason: why such a network structure works in our scenario?*}
% }
%\zh{ try rewriting a little bit.
\qisun{(May 15) double check and revise this paragraph, esp the beginning claims. I may want to add a small figure inset to explain the reasonale.}
\new{
\paragraph{Neural representation}
Existing neural rendering for ``outside-in'' viewing independently train neural networks to predict the 4D vector for individual $\SpatialPt$. This is due to the high variations in view points other than the viewing targets.
However, egocentric viewing is the opposite: despite the translation for 6DoF viewing, the observers' may change viewing targets frequently by rotating the head and gaze (See \Cref{fig:notations}).
That is, given a neural network's capability, a local egocentric neural representation may encode less viewing changes but more spatial variances.
To achieve this aim, we design the network to infer an array of vectors per viewing ray:
\begin{equation}
\mlpFunc(\rayo, \rayd) \triangleq (\mathcal{R},\mathcal{G},\mathcal{B},\mathcal{D}).
\label{eq:vector_rep}
\end{equation}
Here, $\rayo/\rayd$ defines a ray's origin/direction. $\mathcal{R},\mathcal{G},\mathcal{B},\mathcal{D}$ are $\sphereNum$ dimensional vectors representing the R/G/B/intensities of the $\sphereNum$ intersecting points ($\SpatialPt({\sphereRadius_i,\theta_i,\phi_i}), i\in[1,\sphereNum]$) between the ray and the concentric spheres. The system can then render individual color channels via integrating over $\mathcal{R}/\mathcal{G}/\mathcal{B}$ with $\mathcal{D}$ as weights.
%$\mlpFunc(\hat{\SpatialPt})\triangleq(r,g,b,d)$ of $\sphereNum$ intersection points $\mathbf{\hat{\SpatialPt}}({\sphereRadius_{i,i\in[1,\sphereNum]},\theta,\phi})$ between the ray and $\sphereNum$ spherical surfaces.
%Given a camera ray $(\rayo, \rayd)$, we calculated the intersection points between the ray $(\rayo, \rayd)$ and $\sphereNum$ concentric spheres. Thus, we have $\sphereNum$ points $\{\mathbf{\hat{\SpatialPt}}_i\}$ for each ray.
Denser spherical sampling (i.e., higher $\sphereNum$) lead to more precise quality but slower run-time performance.
Inspired by NeRF \cite{mildenhall2020nerf}, we devise a machine learning approach that encodes $\mlpFunc$ as a multi-layer perception (MLP) neural network.
NeRF predicts each individual intersection point $\{\mathbf{\hat{\SpatialPt}}_i\}$ before calculates the final color, causing slow performance due to the $\sphereNum$ network inference operation per ray in run-time.
Our egocentric-viewing-tailored representation (\Cref{eq:vector_rep}) concatenate all $\sphereNum$ encoded coordinates to a vector and feed the coordinates to the MLP module with only one inference.
During training, we define the input $\{\rayo, \rayd\}$ as the $\sphereNum$ intersecting points $\mathbf{\hat{\SpatialPt}_i}$. Our MLP module contains $\mlpLayerNum$ fully-connected layers with $\mlpChannelNum$ channels in each layer. %After, we employ a ray marching to calculate the color based on the predicted colors and densities of $\sphereNum$ points.
%As shown in \autoref{sec:result}, it is possible to achieve high-quality real-time egocentric and stereoscopic view synthesis through our network.
}
%\paragraph{Training and synthesis}
%For each scene, we approximate the function $\mlpFunc$ by a multilayer perceptron (MLP) network, similar to \cite{mildenhall2020nerf}. The network is composed of $\mlpLayerNum$ fully-connected layers of $\mlpChannelNum$ channels, each using ReLu as the activation function. We further determine $\mlpLayerNum$, $\mlpChannelNum$ and $\sphereNum$ via a spatial-temporal optimization in \Cref{sec:method:optimization}. Given rays of pixels in an image, the network predicts the colors and densities along these rays on every sphere. The ray marching weighted integrates predictions in back-to-front order to obtain actual colors of corresponding pixels. Similar to \cite{mildenhall2020nerf}, we apply a trigonometric input encoding and compute the L2-distance between the volume-rendered and mesh-rendered images as the training loss function. We discuss the specifics of training data creation and implementations in \Cref{sec:impl}.\nothing{\zh{Considering removing the last sentence. I think it is clear that this paragraph is not about details, guess reviewers are not expected to see the details. Or we should lead them not to expect details here.}}
\subsection{Gaze-Contingent Synthesis during Run-time}
\label{sec:synthesis}
%The scene representation above is created to parameterize a given 3D space for neural synthesis.
%the foveated rendering manner of VR display through training two orthogonal neural networks that are tailored for the perceptual acuity variances.
%\zh{shall we explicitly name the representation with exactly `concentric spherical coordinate system' earlier?}
Our concentric spherical coordinate, as described in \Cref{sec:method:representation}, addresses the large view target variance problem in the training stage.
However, it may still suffer from significant rendering latency (about half a second for each stereo frame). This is another essential challenges causing neural representation to be unsuitable yet for immersive viewing.
In this research, we leverage the spatially adaptive human visual- and stereoscopic- sensitivities to unlock fast runtime inference.
Instead of the typical single image prediction, we synthesize multiple elemental images to enable real-time responsiveness. The elemental images are generated based on the viewer's head and gaze motions and are adapted to the retinal acuity in resolution and stereo. %We detail our network design and elemental image synthesis in the following section. %We enable real-time responsiveness via introducing gaze-adaptive synthesis mechanisms.
\input{fig_system}
\subsubsection{Adaptive Monoscopic Acuity}
% The human vision is foveated. With the high field-of-view VR displays, we exploit the spatially-adaptive visual acuity to significantly accelerate the computation without compromising {\it perceptual} quality.
The human retinal ganglion cells, which collect and transmit visual information, are not uniformly distributed. Instead, its density in the visual fields close to the retinal center is much higher than the periphery \cite{watson2014formula}. This is referred as foveated vision with recent applications in accelerating immersive rendering \cite{Patney:2016:TFR,Kaplanyan:2019:DNR}.
Inspired by this, we significantly accelerate the runtime inference by integrating the characteristic of spatially-adaptive visual acuity without compromising the {\it perceptual} quality.
\new{Specifically, given device-tracked camera position ($\rayo$), direction ($\camDir$), and gaze position ($\gazeDir$), we synthesize three elemental images cover different eccentricity ranges of field-of-view (FoV) for each eye: the fovea ($\imageFoveal(\rayo,\camDir,\gazeDir)$, $0-20$ deg), the mid-eccentricity ($\imageMid(\rayo,\camDir,\gazeDir)$, $0-45$ deg) and the entire visual field ($\imageFar(\rayo,\camDir$), $0-110$ deg), as shown in \Cref{fig:system}. Note that $\imageFar$ is independent from the gaze direction $\gazeDir$.
% ZH: rewrite as a determined sentence.
Given the decreasing visual acuity from low to high eccentricities, we devise two orthogonal networks with different hyper parameters: $\mlpFunc_{foveal}$ for $\imageFoveal$ (the high acuity fovea), and $\mlpFunc_{periph}$ for $\imageMid$ and $\imageFar$ (reduced acuity).}
%\zh{Considering users have different perceptual sensitivities when the sizes of focus areas are different. We further trained two orthogonal networks with different hyper parameters. Eventually we trained $\mlpFunc_{foveal}$ for foveal image $\imageFoveal$ and $\mlpFunc_{periph}$ for mid-eccentricity image $\imageMid$ and high-eccentricity image $\imageFar$.}
To incorporate the display capabilities and aspect ratios, we define the resolutions of $\imageFoveal$/$\imageMid$/$\imageFar$ as \new{$256^2$/$256^2$/$230\times256$}.
That is, the $\imageFoveal$ has the highest spatial resolution of \new{$12.8$} pixels per degree (PPD), higher than those of $\imageMid$ ($5.7$ PPD) and $\imageFar$ ($2.33$ PPD).
%In each frame, the two networks independently return three elemental images, as seen from the separation range in deg, gradually larger areas along eccentricity. An example is shown in \Cref{fig:system}.
% ZH: add conclusive or ending sentences here.
%In each frame, the two networks independently return three elemental images, as seen from the separation range in deg, gradually larger areas along eccentricity. An example is shown in \Cref{fig:system}.
% ZH: add conclusion or ending sentences here.
\subsubsection{Adaptive Stereoscopic Acuity}
\qisun{(May 15) I feel like this section is a bit thin. Better idea on extending?}
Head-mounted VR displays require stereo rendering to provide parallax depth cues.
So do the elemental images $\imageFoveal^{\{l,r\}}$, $\imageMid^{\{l,r\}}$, and $\imageFar^{\{l,r\}}$ for each (\underline{$l$}eft and \underline{$r$}ight) eye.
The stereoscopic rendering, however, doubles the inference computation that is critical for latency- and frame-rate- sensitive VR experience
\nothing{The separation of the three elemental images considers the varied visual acuity. However, in a head-mounted stereo VR displays, the rendered image are for two eyes, resulting in double computation for $\imageFoveal^{\{l,r\}}$, $\imageMid^{\{l,r\}}$, and $\imageFar^{\{l,r\}}$. Here $l$ and $r$ represent the projection images for left ($\rayo^l$) and right ($\rayo^r$) eyes respectively.
Whereas, because $\imageMid^{\{l,r\}}$ and $\imageFar^{\{l,r\}}$ demand high spatial resolution due to their large eccentricity coverage, }
(please refer to \Cref{sec:study:intra} for breakdown comparisons).
We accommodate the inference process with the adaptive stereo acuity in perception.
In fact, besides the spatial visual acuity in monoscopic vision, psychophysical studies have also revealed human's significantly declined stereopsis while receding from the gaze point \cite{mochizuki2012magnitude}. Motivated by this characteristic, we perform the computation with $\imageFoveal^{\{l,r\}}$, $\imageMid^c$, and $\imageFar^c$ instead of inferring $6$ elemental images, where $c$ indicates the view at the central eye (the midpoint of the left and right eyes). \Cref{fig:mono} visualizes the stereopsis changes from the adaptation using an anaglyph.
\input{TOG/fig_mono_comparison}
\subsubsection{Real-time frame composition}%\dnc{May be moved to section 4 (merge with the paragraph 'Integration'). I think it's not closely related to our methodology and we have no key contributions about that. It's more like an engineering issue}\qisun{This whole section? I was concerned that would break the method completeness. Intentionally kept it short according to contribution level here.}
%\label{sec:method:blending}
%The individual elemental images are further blended as a single frame via a real-time image-based rendering.
\input{TOG/fig_blending}
With the obtained elemental images as input, an image-based rendering in the fragment shader is then executed to generate final frames for each eye (\Cref{fig:system:blending}).
%In real-time, we perform a image-based rendering that blends the elemental images.
The output frames are displayed on the stereo VR HMDs.
{
Two adjunct layers are blended using a smooth-step function across $40\%$ of the inner layer.
This enhances visual consistency on the edges between layers \cite{Guenter:2012:F3G}.
}
To accommodate the mono-view $\imageMid^c$ and $\imageFar^c$, they are shifted towards each eye according to approximated foveal depth range.
%\nothing{we shift it with $-/+ \frac{\rayo^r-\rayo^l}{2}$ for left/right eye respectively. }
Lastly, we enhance the contrast following the mechanism of \cite{Patney:2016:TFR} to further preserve peripheral elemental images' visual fidelity due to its low PDD. The details are visualized in \Cref{fig:method:blending}. %\dnc{Note: the contrast enhancement is performed on elemental images, before blending. Different enhancement parameters are applied for fovea, mid and periph layers}
\subsection{Latency-Quality Joint Optimization}
\label{sec:method:optimization}
As a view synthesis system based on sparse egocentric representation (the $\sphereNum$ spheres per network) and neural network synthesis (the $\mlpLayerNum, \mlpChannelNum$), neural rendering methods inevitably introduce approximation errors. \new{The errors can be reduced by introducing additional networks (thus lowered $\sphereNum$ assuming a fixed number of spheres representing a scene) and increasing individual network's capability (i.e., higher $\mlpLayerNum, \mlpChannelNum$). However,}
these variables also significantly increase the online computational time that is determined by inferring function $\mlpFunc$ and ray marching.
While VR strictly demanding both quality and performance, to seek the optimal latency-quality for human viewers, we present a spatial-temporal model that analytically depicts the correlations and optimizes the variables.
\paragraph{Precision loss of a 3D scene}
As shown in \Cref{fig:notations}, under the egocentric representation, a 3D point $\pt$ is re-projected as the nearest point on a sphere that connects it to the origin point:
\begin{equation}\label{eq:closestPoint3D}
{\pt^\prime}(\sphereNum,\mathbf{\sphereRadius},\SpatialPt) \triangleq \sphereRadius_k \frac{\pt}{\norm{\pt}}, \ k=\argmin_{j\in[1,\sphereNum]}\left(\norm{\norm{\SpatialPt}-\sphereRadius_j}\right).
\end{equation}
Similar to volume-based representation, the multi-spherical system is also defined in the discrete domain. The sparsity thus naturally introduces approximation error that compromises the synthesis quality. To analytically model the precision loss, we investigate the geometric relationship among the camera, the scene, and the representation.
As illustrated in \Cref{fig:teaser:scene,fig:notations}, for a sphere (located at origin point) with radius $\sphereRadius$, its intersection (if exists) with a directional ray $\{\rayo,\rayd\}$ is
\begin{equation}
\intersectionFunc(\sphereRadius,\rayo,\rayd) = \rayo + \left({\left((\rayo\cdot\rayd)^2-\norm{\rayo}^2+\sphereRadius^2\right)}^{\frac{1}{2}}-\rayo\cdot\rayd\right)\rayd,
\label{eq:raySphereIntersection}
\end{equation}
where $\rayo$ and $\rayd$ are the ray's origin point and normalized direction, respectively.
% \begin{equation}
% \left\{\intersectionFunc(\sphereRadius_i,\rayo,\rayd) \right\}\ |\ i\in[1,2,\dots,\sphereNum].
% \end{equation}
Inversely, given a view point $\rayo$ observing a spatial location $\SpatialPt$, the ray connecting them has the direction $\rayd(\SpatialPt,\rayo)=\frac{\SpatialPt-\rayo}{\norm{\SpatialPt-\rayo}}$. This ray may intersect with more than one sphere. Among them, the closest one to $\SpatialPt$ is:
\begin{equation}\label{eq:closestPointImg}
\begin{aligned}
\hat{\pt}&(\rayo,\sphereNum, \mathbf{\sphereRadius},\SpatialPt) \triangleq \\
&\intersectionFunc(\sphereRadius_k,\rayo,\rayd(\SpatialPt,\rayo))\ |\ k=\argmin_{j\in[1,\sphereNum]}\left(\norm{\norm{\SpatialPt}-\sphereRadius_j}\right).
\end{aligned}
\end{equation}
In the 3D space, the offset distance $\norm{\pt^\prime-\hat{\pt}}$ indicates the precision loss at $\pt$ from the representation. By integrating over all views and scene points, we obtain:
\begin{equation}
\begin{aligned}
\sparseError(\sphereNum, \mathbf{\sphereRadius}) &= \iint \norm{\pt^\prime(\sphereNum,\mathbf{\sphereRadius},\SpatialPt)-\hat{\pt}(\rayo,\sphereNum, \mathbf{\sphereRadius},\SpatialPt)} \mathbf{d}\SpatialPt \mathbf{d}\rayo,\\
&\forall \{\rayo, \SpatialPt\} \text{ pair without occlusion in between}.
\label{eq:sparseError}
\end{aligned}
\end{equation}
By integrating all 3D vertices $\SpatialPt$ and camera positions $\rayo$ in our dataset sampling, $\sparseError$ depicts how the generic representation precision of a scene, given a coordinate system defined by $\sphereNum$ and $\mathbf{\sphereRadius}$.
In comparison, a neural representation aims at predicting projected image given a $\rayo$ and $\camDir$. Thus, we further extend \Cref{eq:sparseError} to image space to analyze the error given a set of camera's projection matrix $\projectionMatrix(\rayo,\camDir)$ as
%However, typical 3D representations are not uniformly distributed points. Moreover, the network only predicts pixel colors/intensities but not depth. By extending \Cref{eq:sparseError} to the captured image space data, we obtain
\begin{align}
\imgSpaceError(\sphereNum, \mathbf{\sphereRadius}, \rayo, \camDir) = \int \norm{\projectionMatrix(\rayo,\camDir)\cdot\left(\SpatialPt-
\hat{\pt}(\rayo,\sphereNum, \mathbf{\sphereRadius},\SpatialPt)\right)}\mathbf{d}\SpatialPt.
\label{eq:imageError}
\end{align}
From \Cref{fig:notations,eq:imageError}, we observe:
given a fixed min/max range of $\mathbf{\sphereRadius}$, $\sphereNum$ is negatively correlated to $\imgSpaceError$;
with a fixed $\sphereNum$, the correlation between distribution of $\sphereRadius_j$ and scene content (i.e., distribution of $\SpatialPt$s) also determines $\imgSpaceError$.
However, for neural scene representation, infinitely increasing network capabilities may significantly raise the challenges in training precision and inference performance.
Likewise, increasing representation densities (i.e., lower $\sphereNum$) and/or network complexities (i.e., higher $\mlpLayerNum$/$\mlpChannelNum$) naturally improves the image output quality (lower \Cref{eq:imageError}). However, this significantly increases the computation during ray marching, causing quality drop stretched along time. In the performance-sensitive VR scenario, the latency breaks the continuous viewing experiment and may cause simulator sickness. Thus, with content-aware optimization, we further optimize the system towards an ideal quality-speed balance.%\zh{did not specify coordinate variables before. As well as consistent with the following ``optimal coordinate system ($\sphereNum$ and $\mathbf{\sphereRadius}$) and MLP settings ($\mlpLayerNum$/$\mlpChannelNum$)''}
\paragraph{Spatial-temporal modeling}
\input{TOG/fig_optimization}
Inspired by \cite{Li:2020:TSP,albert2017latency}, we perform spatial-temporal joint modeling to determine the optimal coordinate system ($\sphereNum$) for positional precision and network complexity ($\mlpLayerNum$, $\mlpChannelNum$) for color precision that adapt to individual computational resources and scene content. This is achieved via latency-precision modeling in the spatial-temporal domain:
%Specifically, we model the prediction image $P$ with gaze position ($G$) at time $t$ as $P_G(t)$ and the ground truth retinal image (from a simulated local rendering) as $\hat{P}_G(t)$.
%In practice, increasing the number of spheres $\sphereNum$ or number of MLP layers may affect the computational and transmissional latency but will improve the quality of individual frames (as in \Cref{eq:imageError}). Thus, we define the perceptual error as:
\begin{equation}
\begin{aligned}
\finalError(\sphereNum, &\mlpLayerNum, \mlpChannelNum) =
\sum_t\int \mlpFunc_{\mlpLayerNum, \mlpChannelNum}(\SpatialPt)\times\\ &\norm{\projectionMatrix(\rayo_t,\camDir_t)\cdot\SpatialPt-\projectionMatrix(\rayo_{t-\latency},\camDir_{t-\latency})\cdot\pt(\sphereRadius_k,\rayo_{t-\latency},\rayd_{t-\latency})}\mathbf{d}\SpatialPt,
%&\int \norm{ \imgSpaceError(\sphereNum, \mathbf{\sphereRadius}, \rayo_t, \rayd_t) - \imgSpaceError(\sphereNum, \mathbf{\sphereRadius}, \rayo_{t-l(\sphereNum, \mathbf{\sphereRadius})}, \rayd_{t-l(\sphereNum, \mathbf{\sphereRadius})})) } \mathbf{d} t
\label{eq:error:image}
\end{aligned}
\end{equation}
where $\latency\triangleq\latency(\sphereNum, \mlpLayerNum, \mlpChannelNum)$ is the system latency with a given coordinate and network setting.
$\mlpFunc_{\mlpLayerNum, \mlpChannelNum}(\SpatialPt)$ is the four ($r,g,b,a$) output channels' L1-distance between a given network setting and the highest values $\sphereNum=8,\mlpLayerNum=4,\mlpChannelNum=1024$). For simplicity, we assumed uniformly distributed $\mathbf{\sphereRadius}$ with a fixed range of the spherical coverage.%\zh{``and0 the highest values'' ??}
As suggested by Albert et al. \shortcite{albert2017latency}, the latency for a foveated system shall reach below \textasciitilde$50$ms for undetectable artifacts. Given our test device's eye-tracking latency \textasciitilde$12ms$ and the photon submission latency \textasciitilde$14ms$ (\cite{albert2017latency}), the synthesis and rendering latency shall be less than $L_0 = 24$ms.
Thus, we determine the optimal $\{\sphereNum, \mathbf{\sphereRadius}\}$ to balance latency and precision as
\begin{equation}
\begin{aligned}
&\argmin_{\sphereNum, \mlpLayerNum, \mlpChannelNum} \finalError(\sphereNum, \mlpLayerNum, \mlpChannelNum), \\
& s.t.\ {l(\sphereNum, \mathbf{\sphereRadius})} < L_0.
\end{aligned}
\end{equation}
\Cref{fig:optimization} visualizes an example of the optimization mechanism for an foveal image $\imageFoveal$. The optimized results ($\sphereNum, \mlpLayerNum, \mlpChannelNum$) for individual networks are used for training. The optimization outcomes are detailed in \Cref{sec:impl}.
The visual quality is validated by psychophysical study (\Cref{sec:study:user}) and objective analysis (\Cref{sec:study:quality}). The latency breakdown of our system is reported in \Cref{sec:study:intra}.
| {
"alphanum_fraction": 0.7840887432,
"avg_line_length": 142.5706521739,
"ext": "tex",
"hexsha": "8b35b6e5a2dde86213db3c70fcb09f458d19c224",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "78c5df57431bd9319c738b74f743d3fb27dfa80c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "snowymo/research-templates",
"max_forks_repo_path": "TOG/3-method.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "78c5df57431bd9319c738b74f743d3fb27dfa80c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "snowymo/research-templates",
"max_issues_repo_path": "TOG/3-method.tex",
"max_line_length": 1397,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "78c5df57431bd9319c738b74f743d3fb27dfa80c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "snowymo/research-templates",
"max_stars_repo_path": "TOG/3-method.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6485,
"size": 26233
} |
\subsection{Generative and discriminative models}
\subsubsection{Recap}
For parametric models without dependent variables we have a form:
\(P(y| \theta )\)
And we have various ways of estimating \(\theta \).
We can write this as a likelihood function:
\(L(\theta ;y )=P(y|\theta)\)
\subsubsection{Discriminative models}
In discriminative models we learn:
\(P(y|X, \theta )\)
Which we can write as a likelihood function:
\(L(\theta ;y, X )=P(y| X, \theta)\)
\subsubsection{Generative models}
In generative models we learn:
\(P(y, X| \theta )\)
Which we can write as a likelihood function:
\(L(\theta ;y, X )=P(y, X|\theta)\)
We can use the generative model to calculate dependent probabilities.
\(P(y| X, \theta )=\dfrac{P(y, X| \theta )P(\theta )}{P(X, \theta )}\)
\(P(y| X, \theta )=\dfrac{P(y, X| \theta )}{P(X| \theta )}\)
| {
"alphanum_fraction": 0.6682408501,
"avg_line_length": 20.1666666667,
"ext": "tex",
"hexsha": "5c0d529b1a0cbfc75f408ae6e9bd8490d4ac73e8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/statistics/bayesianDiscriminative/01-01-generativeDiscriminative.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/statistics/bayesianDiscriminative/01-01-generativeDiscriminative.tex",
"max_line_length": 70,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/statistics/bayesianDiscriminative/01-01-generativeDiscriminative.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 259,
"size": 847
} |
\documentclass[letterpaper]{article}
\usepackage{bytefield}
\usepackage{mathtools}
\usepackage{ifxetex}
\ifxetex
\usepackage{fontspec}
\setmainfont[ExternalLocation=../Common/,
Ligatures =TeX,
BoldFont = Cabin-Bold.ttf,
ItalicFont = Cabin-Italic.ttf,
BoldItalicFont = Cabin-BoldItalic.ttf ]
{Cabin-Regular.ttf}
\fi
\usepackage{graphicx}
\usepackage{color}
\usepackage[table]{xcolor}
\usepackage{hyperref}
\usepackage{parskip}
\usepackage{tikz}
\usetikzlibrary{automata,positioning,fit}
\usepackage{fancyhdr}
\pagestyle{fancy}
\definecolor{lightgray}{gray}{0.8}
\renewcommand{\arraystretch}{1.25}
\begin{document}
\title{\huge DK2 Firmware Specification}
\author{Nirav Patel}
\date{Revision 0.10\\
28 April 2014}
\begin{figure}
\includegraphics[width=5in]{../Common/oculus.png}
\end{figure}
\maketitle
\thispagestyle{empty}
\lfoot{DK2 Firmware Specification}
\cfoot{}
\rfoot{\thepage}
\newpage
\tableofcontents
\newpage
\section{Revision History}
\begin{center}
\begin{tabular}{ | l | l | p{8cm} |}
\hline
\cellcolor{lightgray} Revision & \cellcolor{lightgray} Date & \cellcolor{lightgray} Description \\ \hline
0.1 & 28-11-2013 & Preliminary specification. \\ \hline
0.2 & 08-12-2013 & Add camera feature report. \\ \hline
0.3 & 11-12-2013 & Add display feature report. \\ \hline
0.4 & 23-12-2013 & Add magnetometer and position calibration and LED pattern reports. \\ \hline
0.5 & 26-12-2013 & Add keep alive, manufacturing, and uuid reports. \\ \hline
0.6 & 30-01-2014 & Add temperature report, modify position report. \\ \hline
0.7 & 13-02-2014 & Add gyro offset report. \\ \hline
0.8 & 18-02-2014 & Add normals to position calibration, rename camera to tracking. \\ \hline
0.9 & 24-02-2014 & Add lens distortion report. \\ \hline
0.10 & 28-04-2014 & Add basic EDID information. \\ \hline
\end{tabular}
\end{center}
\newpage
\section{Introduction}
The Rift DK2 is interfaced using USB HID with a 1000 Hz polling rate. The device is configured using Feature Report Gets and Sets and reports back gyroscope, accelerometer, magnetometer, and synchronization timestamps through an IN Report. For more information about HID, see the Device Class Definition for Human Interface Devices\footnote{\url{http://www.usb.org/developers/devclass_docs/HID1_11.pdf}}
\begin{itemize}
\item {\bfseries Vendor ID:} {\em 0x2833}
\item {\bfseries Product ID:} {\em 0x0021}
\item {\bfseries Vendor String:} {\em Oculus VR, Inc.}
\item {\bfseries Product String:} {\em Rift DK2}
\item {\bfseries Serial String:} {\em Globally unique 20 byte Base32 string generated per device}
\end{itemize}
\newpage
\section{HID In Report}
The 64 byte IN report on Endpoint 1 contains the sensor data. All data is in little-endian format. The gyro and accelerometer report data at a rate of 1000 Hz, which is also the rate at which the sample time is incremented.\\
While the target rate for the host polling the device for the IN report is 1000 Hz, system or bus load can cause the polling to happen on longer intervals, dropping the reporting rate below 1000 Hz. For this reason, the report contains fields for up to 2 samples of gyro and accelerometer data as well as a field stating the number of samples being reported. The behavior is as follows:\\
\begin{itemize}
\item If the number of recorded samples is $<=$ 2, the corresponding number
of samples is returned with NumSamples set accordingly.
\item If the number of recorded samples is $>$ 2, the first (NumSamples -1) samples
are averaged into Samples[0], while Samples[1] contains the most recent recorded sample.
NumSamples is set to the total original number of
recorded samples and Timestamp is set to that of the most recent sample; this allows for PC software to compensate the integration.
\item DK2 does not accumulate more then 254 samples; beyond that the
NumSamples is reset to 2 and only the most recent 2 samples are reported, with the Timestamp field properly incremented to indicate the loss of samples.
\end{itemize}
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 11} & \bitbox{16}{LastCommandID} & \bitbox{8}{NumSamples}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{16}{SampleCount} & \bitbox{16}{Temperature}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{32}{SampleTimestamp}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\wordbox[lrt]{1}{Samples[0]}
\end{leftwordgroup} \\
\skippedwords \\
\wordbox[lrb]{1}{} \\
\begin{leftwordgroup}{28}
\wordbox[lrt]{1}{Samples[1]}
\end{leftwordgroup} \\
\skippedwords \\
\wordbox[lrb]{1}{} \\
\begin{leftwordgroup}{44}
\bitbox{16}{MagX} & \bitbox{16}{MagY}
\end{leftwordgroup} \\
\begin{leftwordgroup}{48}
\bitbox{16}{MagZ} & \bitbox{16}{FrameCount}
\end{leftwordgroup} \\
\begin{leftwordgroup}{52}
\bitbox{32}{FrameTimestamp}
\end{leftwordgroup} \\
\begin{leftwordgroup}{56}
\bitbox{8}{FrameID} & \bitbox{8}{TrackingPattern} & \bitbox{16}{TrackingCount}
\end{leftwordgroup} \\
\begin{leftwordgroup}{60}
\bitbox{32}{TrackingTimestamp}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 11.
\item {\bfseries LastCommandID} (16 bits): Contains the CommandID from the last Feature Report set on DK2.
\item {\bfseries NumSamples} (8 bits): The number of samples the report represents as described above.
\item {\bfseries SampleCount} (16 bits): A running counter of samples, for the number of the first sample in the report. As the internal sampling rate targets 1000 Hz, this unit is close to milliseconds.
\item {\bfseries Temperature} (16 bits): The most recent internal sensor temperature recorded by the DK2. The value is in two's complement format, in units of centidegrees Celsius.
\item {\bfseries SampleTimestamp} (32 bits): A microsecond timestamp of the last sensor sample in the packet. This is recorded on the same clock as the FrameTimestamp and TrackingTimestamp.
\item {\bfseries Samples} (128 bits): Each sample contains X, Y, and Z data for the gyro and accelerometer. Specified below.
\item {\bfseries MagX, MagY, MagZ} (16 bits each): The most recent data available from the magnetometer. The three axes are sampled simultaneously at up to 100 Hz. Note that consecutive reports may contain the same magnetometer data. The data is two's complement in units of $10\textsuperscript{-4}$ gauss.
\item {\bfseries FrameCount} (16 bit): A running counter of frames sent to the display, incremented at each vsync.
\item {\bfseries FrameTimestamp} (32 bit): A microsecond timestamp of the last vsync sent to the display. This is recorded on the same clock as the SampleTimestamp and TrackingTimestamp.
\item {\bfseries FrameID} (8 bit): The green value of the top left pixel in the last frame sent to the display. This is used to identify specific frames to measure the end to end latency from motion to photon.
\item {\bfseries TrackingPattern} (8 bit): The index of the last tracking LED pattern exposed to the camera.
\item {\bfseries TrackingCount} (16 bit): A running counter of camera frames triggered by the headset.
\item {\bfseries TrackingTimestamp} (32 bit): A microsecond timestamp of the last camera exposure. This is recorded on the same clock as the SampleTimestamp and FrameTimestamp.
\end{itemize}
\subsection{Accelerometer and Gyro Sample}
The X, Y, and Z axes of the gyro and accelerometer are sampled simultaneously. The accelerometer fields are two's complement format and 21 bits each. They are in units of $\frac{m}{s\textsuperscript{2}}\cdot 10\textsuperscript{-4}$. The gyroscope fields are two's complement and 21 bits each. They are in units of $\frac{\theta}{s}\cdot 10\textsuperscript{-4}$ in radians.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{21}{AccelX} & \bitbox{11}{AccelY[0:10]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{10}{AccelY[11:20]} & \bitbox{21}{AccelZ} & \bitbox{1}{\color{lightgray}\rule{\width}{\height}}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{21}{GyroX} & \bitbox{11}{GyroY[0:10]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{10}{GyroY[11:20]} & \bitbox{21}{GyroZ} & \bitbox{1}{\color{lightgray}\rule{\width}{\height}}
\end{leftwordgroup} \\
\end{bytefield}
\newpage
\section{Feature Reports}
\subsection{Tracking}
The 13 byte Tracking report has a ReportID of 12. It configures the infrared tracking LEDs and camera synchronization. \\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 12} & \bitbox{16}{CommandID} & \bitbox{8}{Pattern}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{1}{a} & \bitbox{1}{b} & \bitbox{1}{c} & \bitbox{1}{d} & \bitbox{1}{e} & \bitbox{1}{f} & \bitbox{10}{} & \bitbox{16}{ExposureLength}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{16}{FrameInterval} & \bitbox{16}{VsyncOffset}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{8}{DutyCycle} & \bitbox[]{24}{}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 12.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries Pattern} (8 bits): The index of the pattern of high and low intensities that is currently set on the tracking LEDs. Pattern 255 is reserved for all LEDs set high. Patterns auto-increment after each exposure if the Autoincrement bit is set in the bitmask. The Pattern starts at 0 on power on.
\item {\bfseries (a) Enable} (1 bit): This bit enables the tracking LED exposure and updating. This currently defaults to on, but will default off in a later firmware revision.
\item {\bfseries (b) Autoincrement} (1 bit): With Autoincrement set, the Pattern is incremented after every exposure. This defaults to on.
\item {\bfseries (c) UseCarrier} (1 bit): The tracking LEDs modulate at 85 kHz to allow for wireless synchronization when UseCarrier is enabled. This defaults to on.
\item {\bfseries (d) SyncInput} (1 bit): With SyncInput enabled, LED exposure is triggered from a rising edge on GPIO1. With SyncInput disabled, the LED exposure is triggered from an internal timer at FrameInterval. This defaults to off.
\item {\bfseries (e) VsyncLock} (1 bit)(Disabled): If enabled, LED exposure is triggered on panel Vsync rather than from an internal timer or external trigger. This defaults to off.
\item {\bfseries (f) CustomPattern} (1 bit): If enabled, the LED sequence will be the one loaded in using the CustomPattern report. This defaults to off.
\item {\bfseries ExposureLength} (16 bits): This is the length of time the tracking LEDs are enabled for in microseconds. The sync output also follows this length. This number cannot be larger than the FrameInterval. The minimum length is approximately 10 microseconds.
\item {\bfseries FrameInterval} (16 bits): When SyncInput and VsyncLock are false, the tracking LEDs are exposed on the interval set in FrameInterval in microseconds.
\item {\bfseries VsyncOffset} (16 bits)(Disabled): When VsyncLock is enabled, the exposure can be performed a fixed offset of VsyncOffset microseconds after the panel vsync.
\item {\bfseries DutyCycle} (8 bits): When UseCarrier is true, the duty cycle of the 85 kHz modulation can be increased or decreased. The default value is 128, which results in a $50\%$ duty cycle.
\end{itemize}
\newpage
\subsection{Display}
The 16 byte Display report has a ReportID of 13. It configures the display panel. \\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 13} & \bitbox{16}{CommandID} & \bitbox{8}{Brightness}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{4}{a} & \bitbox{2}{b} & \bitbox{1}{c} & \bitbox{1}{d} & \bitbox{1}{e} & \bitbox{1}{f} & \bitbox{1}{g} & \bitbox{1}{h} &\bitbox{20}{}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{16}{Persistence} & \bitbox{16}{LightingOffset}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{16}{PixelSettle} & \bitbox{16}{TotalRows}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 13.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries Brightness} (8 bits): A relative brightness setting that is independent of pixel persistence. The field only takes effect if HighBrightness is disabled.
\item {\bfseries (a) ShutterType} (4 bits)(Read only): A read-only enum of the method in which the display is scanned and exposed.
\item {\bfseries (b) CurrentLimit} (2 bit): An enum of the current limiting mode in use.
\item {\bfseries (c) UseRolling} (1 bit): Switches between global shutter and rolling shutter update modes. This defaults to true.
\item {\bfseries (d) ReverseRolling} (1 bit): Switches the polarity on the rolling shutter mode. Reverse had the lighting of rows happen before the black period. This defaults to true.
\item {\bfseries (e) HighBrightness} (1 bit)(Disabled): This enables a mode in which maximum brightness is used, at the possible expense of panel lifetime. This defaults to false.
\item {\bfseries (f) SelfRefresh} (1 bit): This sets the panel to self refresh from the contents of its frame buffer. This defaults to false.
\item {\bfseries (g) ReadPixel} (1 bit): This enables reading the top left pixel each frame. This currently defaults to false, but will default to true in a later firmware revision.
\item {\bfseries (h) DirectPentile} (1 bit): This enables direct mapping of input data to panel sub pixel geometry, rather than using in-panel interpolation.
\item {\bfseries Persistence} (16 bits): This is the length of time in rows that the display is lit each frame. This defaults to the full size of the display, meaning full persistence.
\item {\bfseries LightingOffset} (16 bits)(Disabled): This is the offset in rows from vsync that the panel is lit when using global shutter mode. In rolling shutter, this has no effect.
\item {\bfseries PixelSettle} (16 bits)(Read only): This is the read-only time in microseconds that we estimate the pixel takes to settle to the value it is set to after it is lit.
\item {\bfseries TotalRows} (16 bits)(Read only): This is the read-only number of rows including active area and blanking used with Persistence and LightingOffset.
\end{itemize}
\newpage
\subsection{MagCalibration}
The 52 byte MagCalibration report has a ReportID of 14. It contains the 3 by 4 matrix of parameters used to fit the magnetometer to a sphere. The table is stored on the headset, but the firmware does not apply it to outgoing magnetometer data. \\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 14} & \bitbox{16}{CommandID} & \bitbox{8}{Version}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{32}{Calibration[0][0]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{32}{Calibration[0][1]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{32}{Calibration[0][2]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{16}
\bitbox{32}{Calibration[0][3]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{20}
\bitbox{32}{Calibration[1][0]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{24}
\bitbox{32}{Calibration[1][1]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{28}
\bitbox{32}{Calibration[1][2]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{32}
\bitbox{32}{Calibration[1][3]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{36}
\bitbox{32}{Calibration[2][0]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{40}
\bitbox{32}{Calibration[2][1]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{44}
\bitbox{32}{Calibration[2][2]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{48}
\bitbox{32}{Calibration[2][3]}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 14.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries Version} (8 bits): The version of the calibration procedure used to generate the stored table.
\item {\bfseries Calibration} (48 bytes): This is the 3 by 4 matrix used to fit magnetometer data to a sphere. Each field is a 32 bit signed integer. The specific value is dependent on the calibration procedure, indicated by Version.
\end{itemize}
\newpage
\subsection{PositionCalibration}
The 30 byte PositionCalibration report has a ReportID of 15. It allows reading and storage of calibrated positions of each tracking LED as well as the inertial tracker.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 15} & \bitbox{16}{CommandID} & \bitbox{8}{Version}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{32}{Position[0]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{32}{Position[1]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{32}{Position[2]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{16}
\bitbox{16}{Normal[0]} && \bitbox{16}{Normal[1]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{20}
\bitbox{16}{Normal[2]} && \bitbox{16}{Rotation}
\end{leftwordgroup} \\
\begin{leftwordgroup}{24}
\bitbox{16}{PositionIndex} & \bitbox{16}{NumPositions}
\end{leftwordgroup} \\
\begin{leftwordgroup}{28}
\bitbox{16}{PositionType} & \bitbox[]{16}{}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 15.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries Version} (8 bits): The version of the calibration procedure used to generate the stored positions. Note that setting the report with a Version value of 1 returns the Position values for the PositionIndex to the stored defaults.
\begin{description}
\item[0 -] None
\item[1 -] Hard-coded default positions
\item[2 -] Factory calibrated
\item[3 -] User calibrated
\end{description}
\item {\bfseries Position} (12 bytes): The X, Y, and Z axis position of the LED or inertial tracker. This is a signed integer in units of micrometers. The position is relative to the center of the emitter plane of the display at nominal focus.
\item {\bfseries Normal} (6 bytes): The X, Y, and Z axis normal of the LED or inertial tracker. This is a signed integer in units of micrometers. The Normal is relative to the Position.
\item {\bfseries Rotation} (16 bits): The rotation around the normal. This is in units of $ 10\textsuperscript{-4}$ radians.
\item {\bfseries PositionIndex} (16 bits): This is the current position being read or written to. This autoincrements on reads, and gets set to the written value on writes.
\item {\bfseries NumPositions} (16 bits)(Read only): This is the read-only number of items with positions stored.
\item {\bfseries PositionType} (16 bits)(Read only): The type of the item which has its position reported in the current report. These are defined as:
\begin{description}
\item[0 -] Tracking LED
\item[1 -] Inertial sensor
\end{description}
When setting this report, the PositionType should match that read for the PositionIndex.
\end{itemize}
\newpage
\subsection{CustomPattern}
The 12 byte CustomPattern report has a ReportID of 16. It allows reading of the default LED patterns as well as reading and setting of custom patterns. Repeated reads of the report iterate through each LED, starting at 0 and wrapping at the final LED. When the CustomPattern bit of the Tracking report is not set, reads on this report act upon the default LED pattern stored in the headset, and writes have no effect. With the CustomPattern bit set, this report acts on a custom pattern array that erases when the headset is reset.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 16} & \bitbox{16}{CommandID} & \bitbox{8}{SequenceLength}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{32}{Sequence}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{16}{LEDIndex} & \bitbox{16}{NumLEDs}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 16.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries SequenceLength} (8 bits): The length of the sequence of bits that each LED goes through. Each state of the sequence is represented by two bits, allowing a maximum of 4 levels per sequence. For DK2, only states 0, 1, and 3 are meaningful, for off, low, and high respectively.
\item {\bfseries Sequence} (32 bits): The sequence that the specific LED goes through. The first $SequenceLength*2$ bits are used. The sequence is in order of LSB to MSB.
\item {\bfseries LEDIndex} (16 bits): This is the current LED being read or written to. This autoincrements on reads, and gets set to the written value on writes.
\item {\bfseries NumLEDs} (16 bits)(Read only): This is the read-only number of tracking LEDs which are actually present on the device.
\end{itemize}
\newpage
\subsection{KeepAliveMux}
The 6 byte KeepAliveMux report has a ReportID of 17. It is used to specify both which IN Report is sent and the interval of sending. The interval takes effect only if the UseCommandKeepAlive or UseMotionKeepAlive bits are set in the Config report.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 17} & \bitbox{16}{CommandID} & \bitbox{8}{INReport}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{16}{Interval} & \bitbox[]{16}{}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 17.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries INReport} (8 bits): The specified number is the INReport which will be repeatedly sent until the interval is passed. For example, specifying 1 sends the DK1 IN Report, while specifying 11 sends the DK2 IN Report. The default value for DK2 is 11.
\item {\bfseries Interval} (16 bits): The interval in milliseconds used as the threshold for UseCommandKeepAlive and UseMotionKeepAlive. The default interval is 10000.
\end{itemize}
\newpage
\subsection{Manufacturing}
The 16 byte Manufacturing report has a ReportID of 18. It is used to record and display the results of each stage of the production of the DK2 product that has USB access.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 18} & \bitbox{16}{CommandID} & \bitbox{8}{NumStages}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{8}{Stage} & \bitbox{8}{StageVersion} & \bitbox{16}{StageLocation}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{32}{StageTime}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{32}{Result}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 18.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries NumStages} (8 bits)(Read only): The number of stages of production being recorded on the product.
\item {\bfseries Stage} (8 bits): The specific stage number being recorded or read from. This autoincrements on reads, and gets set to the written value on writes.
\item {\bfseries StageVersion} (8 bits): The version of the specific manufacturing test.
\item {\bfseries StageLocation} (16 bits): A bit mask representing the manufacturer, line, and machine this test was performed on. See the DK2 Testware document for details on this.
\item {\bfseries StageTime} (32 bits): The UNIX time at which the stage was performed.
\item {\bfseries Result} (32 bits): The result of the stage. See the DK2 Testware document for the details of what this means for each stage.
\end{itemize}
\newpage
\subsection{UUID}
The 23 byte UUID report has a ReportID of 19. It is used internally for manufacturing purposes, when the USB serial number is set to a default number. Outside of manufacturing, the number is identical to the USB serial number. Setting this report switches the USB serial number to the value read from this report. The value being set is ignored.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 19} & \bitbox{16}{CommandID} & \bitbox{8}{UUID[0]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{32}{UUID[1:5]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{32}{UUID[5:9]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{32}{UUID[9:13]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{16}
\bitbox{32}{UUID[13:17]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{20}
\bitbox{24}{UUID[17:20]} & \bitbox[]{8}{}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 19.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries UUID} (20 bytes)(Read only): The unique ID of the device.
\end{itemize}
\newpage
\subsection{Temperature}
The 24 byte Temperature report has a ReportID of 20. It is used to store gyroscope zero rate offsets across different temperatures. The offsets are stored for certain target temperature Bins, with multiple Samples of offset data stored per temperature. When reading the report, subsequent reads of the report go sequentially through each Sample in NumSamples for each Bin in NumBins. The headset only stores the offsets, and does not create or modify them directly.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 20} & \bitbox{16}{CommandID} & \bitbox{8}{Version}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{8}{NumBins} & \bitbox{8}{Bin} & \bitbox{8}{NumSamples} & \bitbox{8}{Sample}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{16}{TargetTemperature} & \bitbox{16}{ActualTemperature}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{32}{Time}
\end{leftwordgroup} \\
\begin{leftwordgroup}{16}
\wordbox[lrt]{1}{Offset}
\end{leftwordgroup} \\
\skippedwords \\
\wordbox[lrb]{1}{} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 20.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries Version} (8 bits): The version of temperature calibration being performed.
\item {\bfseries NumBins} (8 bits)(Read only): The number of temperature bins being stored.
\item {\bfseries Bin} (8 bits): The current index of temperature bin the samples are for. This autoincrements on reading all of the Samples in the Bin, and gets set to the written value on writes.
\item {\bfseries NumSamples} (8 bits)(Read only): The number of samples being stored per temperature bin.
\item {\bfseries Sample} (8 bits): The current index of temperature offset sample. This autoincrements on reads, and gets set to the written value on writes.
\item {\bfseries TargetTemperature} (16 bits)(Read only): The target temperature in centidegrees Celsius for the current Level.
\item {\bfseries ActualTemperature} (16 bits): The actual temperature in centidegrees Celsius of the current Bin.
\item {\bfseries Time} (32 bits): The UNIX time that the current Sample was generated at.
\item {\bfseries Offset} (64 bits): The packed sample format with 21 bits per axis used for the IN Report, with each value in units of $\frac{\theta}{s}\cdot 10\textsuperscript{-4}$ radians, representing the estimated zero-rate gyroscope offset for the ActualTemperature.
\end{itemize}
\newpage
\subsection{GyroOffset}
The 18 byte Temperature report has a ReportID of 21. It is used to fetch the headset's most recent estimate of gyro zero-rate offset error. Note that if the headset fails to complete zero rate offset calculation, there will not be valid data in this report, indicated by a Version field value of 0.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 21} & \bitbox{16}{CommandID} & \bitbox{8}{Version}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\wordbox[lrt]{1}{Offset}
\end{leftwordgroup} \\
\skippedwords \\
\wordbox[lrb]{1}{} \\
\begin{leftwordgroup}{12}
\bitbox{32}{Timestamp}
\end{leftwordgroup} \\
\begin{leftwordgroup}{16}
\bitbox{16}{Temperature} & \bitbox[]{16}{}
\end{leftwordgroup} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 21.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries Version} (8 bits): The version of offset calculation being performed.
\begin{description}
\item[0 -] No offset calculated
\item[1 -] Running average terminated by motion
\item[2 -] Running average terminated by timeout
\end{description}
\item {\bfseries Offset} (64 bits): The packed sample format with 21 bits per axis used for the IN Report, with each value in units of $\frac{\theta}{s}\cdot 10\textsuperscript{-4}$ radians, representing the estimated zero-rate gyroscope offset.
\item {\bfseries Timestamp} (32 bits): A microsecond timestamp of the calculation of the offset. This is recorded on the same clock as the SampleTimestamp in the IN Report.
\item {\bfseries Temperature} (16 bits): The temperature the offset was calculated at.
\end{itemize}
\newpage
\subsection{LensDistortion}
The 64 byte LensDistortion report has a ReportID of 22. It defines parameters used by LibOVR to correct lens distortion. Multiple reports of the same or different versions or eye reliefs may be available, defined by NumDistortions. The report defaults to factory data, but each DistortionIndex may be overwritten with new data.\\
\begin{bytefield}[leftcurly=.,bitwidth=1.1em]{32}
\bitheader{0-31} \\
\begin{leftwordgroup}{0}
\bitbox{8}{ReportID = 22} & \bitbox{16}{CommandID} & \bitbox{8}{NumDistortions}
\end{leftwordgroup} \\
\begin{leftwordgroup}{4}
\bitbox{8}{DistortionIndex} & \bitbox{8}{Bitmask} & \bitbox{16}{LensType}
\end{leftwordgroup} \\
\begin{leftwordgroup}{8}
\bitbox{16}{Version} & \bitbox{16}{EyeRelief}
\end{leftwordgroup} \\
\begin{leftwordgroup}{12}
\bitbox{16}{KCoefficients[0]} & \bitbox{16}{KCoefficients[1]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{16}
\bitbox{16}{KCoefficients[2]} & \bitbox{16}{KCoefficients[3]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{20}
\bitbox{16}{KCoefficients[4]} & \bitbox{16}{KCoefficients[5]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{24}
\bitbox{16}{KCoefficients[6]} & \bitbox{16}{KCoefficients[7]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{28}
\bitbox{16}{KCoefficients[8]} & \bitbox{16}{KCoefficients[9]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{32}
\bitbox{16}{KCoefficients[10]} & \bitbox{16}{MaxR}
\end{leftwordgroup} \\
\begin{leftwordgroup}{36}
\bitbox{16}{MetersPerTanAngleAtCenter} & \bitbox{16}{ChromaticAberration[0]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{40}
\bitbox{16}{ChromaticAberration[1]} & \bitbox{16}{ChromaticAberration[2]}
\end{leftwordgroup} \\
\begin{leftwordgroup}{44}
\bitbox{16}{ChromaticAberration[3]} & \bitbox[lrt]{16}{}
\end{leftwordgroup} \\
\begin{leftwordgroup}{48}
\wordbox[lr]{1}{Unused}
\end{leftwordgroup} \\
\skippedwords \\
\wordbox[lrb]{1}{} \\
\end{bytefield}
\begin{itemize}
\item {\bfseries ReportID} (8 bits): The USB Report ID for this report is 22.
\item {\bfseries CommandID} (16 bits): A sequence number that is then repeated in the LastCommandID field of the HID IN Report.
\item {\bfseries NumDistortions} (8 bits)(Read only): The number of lens distortions being stored.
\item {\bfseries DistortionsIndex} (8 bits): The current index of lens distortion. This autoincrements on reads and gets set to the written value on writes.
\item {\bfseries Bitmask} (8 bits): A current unused bitmask.
\item {\bfseries LensType} (16 bits): The type of the lens used in the headset. These are enumerated in LibOVR.
\item {\bfseries Version} (16 bits): The version of lens distortion specified in the report. The fields after this one are dependent on the specified Version.
\begin{description}
\item[0 -] No lens distortion stored. The remaining report is empty.
\item[1 -] LCSV\_CatmullRom10Version1. The fields are defined as below.
\end{description}
\item {\bfseries EyeRelief} (16 bits): The eye relief the distortion is designed for. This is in units of micrometers measured from the front surface of the lens.
\item {\bfseries KCoefficients} (11 fields of 16 bits): Catmull-Rom distortion coefficients. The units are defined in LibOVR.
\item {\bfseries MaxR} (16 bits): Defined in LibOVR.
\item {\bfseries MetersPerTanAngleAtCenter} (16 bits): Defined in LibOVR.
\item {\bfseries ChromaticAberration} (4 fields of 16 bits): Defined in LibOVR.
\item {\bfseries Unused} (14 bytes): Unused by LCSV\_CatmullRom10Version1.
\end{itemize}
\newpage
\section{EDID}
The EDID 1.3 is read over the HDMI interface by the video host.
\begin{itemize}
\item {\bfseries Manufacturer ID:} {\em OVR}
\item {\bfseries Product ID:} {\em 0x0003}
\item {\bfseries Product String:} {\em Rift DK2}
\item {\bfseries Serial String:} {\em First 13 bytes of the device UUID}
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.7097349572,
"avg_line_length": 55.032208589,
"ext": "tex",
"hexsha": "b29026a96489f2db75bd0c1f901a0da83902f17a",
"lang": "TeX",
"max_forks_count": 60,
"max_forks_repo_forks_event_max_datetime": "2022-01-08T14:19:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-10-10T16:38:13.000Z",
"max_forks_repo_head_hexsha": "42363e7cd246a24816158181ee37aed32703229b",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "wglnngt/RiftDK2",
"max_forks_repo_path": "Documentation/DK2FirmwareSpecification.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "42363e7cd246a24816158181ee37aed32703229b",
"max_issues_repo_issues_event_max_datetime": "2021-12-19T20:16:56.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-19T20:16:56.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "wglnngt/RiftDK2",
"max_issues_repo_path": "Documentation/DK2FirmwareSpecification.tex",
"max_line_length": 537,
"max_stars_count": 207,
"max_stars_repo_head_hexsha": "42363e7cd246a24816158181ee37aed32703229b",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "vrmad1/RiftDK2-Archive",
"max_stars_repo_path": "Documentation/DK2FirmwareSpecification.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T14:43:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-09T17:48:54.000Z",
"num_tokens": 10326,
"size": 35881
} |
%-------------------------------------------------------%
\section{Overview} \label{sec:tutorial_real_intro}
%-------------------------------------------------------%
In this chapter, the basic execution procedure of the real atmospheric experiment is described using a simple case according to the workflow in Fig. \ref{fig:howto}.
\begin{enumerate}
\item Preparations for input data. The input data must be prepared by users themselves.
\item \texttt{pp}: Making topographical data
\item \texttt{init}: Making initial and boundary data
\item \texttt{run}: Executing the simulation
\item \texttt{net2g}: Converting {\netcdf} output data to {\grads} format ( optional )
\end{enumerate}
Hereinafter, the absolute path \texttt{scale-{\version}/scale-rm/test/tutorial/} is denoted by\\
\verb|${Tutorial_DIR}|.
\begin{figure}[tb]
\begin{center}
\includegraphics[width=0.9\hsize]{./../../figure/real_procedure.pdf}\\
\caption{\scalerm procedure of model execution}
\label{fig:howto}
\end{center}
\end{figure}
The settings for the calculation domain used in this tutorial are given in Table \ref{tab:grids}.
Figure \ref{fig:tutorial_real_domain} shows the target domain.
Since this tutorial focuses on learning how to conduct
real atmospheric experiments using \scalerm quickly,
the experiment is designed to be completed in a short time.
Note that this setting may not be appropriate as a physically valid experiment,
and in practical application one should examine the experimental setup as needed.
\begin{table}[tb]
\begin{center}
\caption{Overview of experimental settings}
\label{tab:grids}
\begin{tabularx}{150mm}{|l|X|} \hline
\rowcolor[gray]{0.9} Item & Configuration \\ \hline
MPI process decomposition (east-west $\times$ north-south) & 2 $\times$ 2 (total: 4 processes) \\ \hline
Number of horizontal grids (east-west $\times$ north-south) & 90 $\times$ 90 \\ \hline
Number of vertical layers & 36 \\ \hline
Horizontal grid intervals & $\Delta x = \Delta y = $ 20km \\ \hline
Integration period & July 14, 2007, 18UTC - July 15 00UTC (6 hour integration) \\ \hline
Time step & 90 s/step (total:240 steps) \\ \hline
\end{tabularx}
\end{center}
\end{table}
\begin{figure}[tb]
\begin{center}
\includegraphics[width=0.95\hsize]{./../../figure/real_domain.pdf}\\
\caption{Topographical and land-ocean distribution in the domain}
\label{fig:tutorial_real_domain}
\end{center}
\end{figure}
| {
"alphanum_fraction": 0.6955995155,
"avg_line_length": 44.2321428571,
"ext": "tex",
"hexsha": "111143c51c1755803215a0ac3f4069ece294d1be",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-07-28T22:20:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-10T10:39:45.000Z",
"max_forks_repo_head_hexsha": "ca4b476ad55cb728b2009f0427ce3f7161ecfcf7",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "slayoo/scale",
"max_forks_repo_path": "doc/users-guide/en/41_real_exp_introduction.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "ca4b476ad55cb728b2009f0427ce3f7161ecfcf7",
"max_issues_repo_issues_event_max_datetime": "2021-07-30T05:08:47.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-07-29T03:38:05.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "slayoo/scale",
"max_issues_repo_path": "doc/users-guide/en/41_real_exp_introduction.tex",
"max_line_length": 165,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "ca4b476ad55cb728b2009f0427ce3f7161ecfcf7",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "slayoo/scale",
"max_stars_repo_path": "doc/users-guide/en/41_real_exp_introduction.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-13T05:29:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-14T11:12:31.000Z",
"num_tokens": 666,
"size": 2477
} |
%% This is file `elsarticle-template-1-num.tex',
%%
%% Copyright 2009 Elsevier Ltd
%%
%% This file is part of the 'Elsarticle Bundle'.
%% ---------------------------------------------
%%
%% It may be distributed under the conditions of the LaTeX Project Public
%% License, either version 1.2 of this license or (at your option) any
%% later version. The latest version of this license is in
%% http://www.latex-project.org/lppl.txt
%% and version 1.2 or later is part of all distributions of LaTeX
%% version 1999/12/01 or later.
%%
%% Template article for Elsevier's document class `elsarticle'
%% with numbered style bibliographic references
%%
%% $Id: elsarticle-template-1-num.tex 149 2009-10-08 05:01:15Z rishi $
%% $URL: http://lenova.river-valley.com/svn/elsbst/trunk/elsarticle-template-1-num.tex $
%%
\documentclass[preprint,12pt]{elsarticle}
%% Use the option review to obtain double line spacing
%% \documentclass[preprint,review,12pt]{elsarticle}
%% Use the options 1p,twocolumn; 3p; 3p,twocolumn; 5p; or 5p,twocolumn
%% for a journal layout:
%% \documentclass[final,1p,times]{elsarticle}
%% \documentclass[final,1p,times,twocolumn]{elsarticle}
%% \documentclass[final,3p,times]{elsarticle}
%% \documentclass[final,3p,times,twocolumn]{elsarticle}
%% \documentclass[final,5p,times]{elsarticle}
%% \documentclass[final,5p,times,twocolumn]{elsarticle}
%% The graphicx package provides the includegraphics command.
\usepackage{graphicx}
%% The amssymb package provides various useful mathematical symbols
\usepackage{amssymb}
%% The amsthm package provides extended theorem environments
%% \usepackage{amsthm}
%% The lineno packages adds line numbers. Start line numbering with
%% \begin{linenumbers}, end it with \end{linenumbers}. Or switch it on
%% for the whole article with \linenumbers after \end{frontmatter}.
\usepackage{lineno}
%% natbib.sty is loaded by default. However, natbib options can be
%% provided with \biboptions{...} command. Following options are
%% valid:
%% round - round parentheses are used (default)
%% square - square brackets are used [option]
%% curly - curly braces are used {option}
%% angle - angle brackets are used <option>
%% semicolon - multiple citations separated by semi-colon
%% colon - same as semicolon, an earlier confusion
%% comma - separated by comma
%% numbers- selects numerical citations
%% super - numerical citations as superscripts
%% sort - sorts multiple citations according to order in ref. list
%% sort&compress - like sort, but also compresses numerical citations
%% compress - compresses without sorting
%%
%% \biboptions{comma,round}
% \biboptions{}
\journal{TJHSST SYSLAB}
\begin{document}
\begin{frontmatter}
%% Title, authors and addresses
\title{Journal 21}
%% use the tnoteref command within \title for footnotes;
%% use the tnotetext command for the associated footnote;
%% use the fnref command within \author or \address for footnotes;
%% use the fntext command for the associated footnote;
%% use the corref command within \author for corresponding author footnotes;
%% use the cortext command for the associated footnote;
%% use the ead command for the email address,
%% and the form \ead[url] for the home page:
%%
%% \title{Title\tnoteref{label1}}
%% \tnotetext[label1]{}
%% \author{Name\corref{cor1}\fnref{label2}}
%% \ead{email address}
%% \ead[url]{home page}
%% \fntext[label2]{}
%% \cortext[cor1]{}
%% \address{Address\fnref{label3}}
%% \fntext[label3]{}
%% use optional labels to link authors explicitly to addresses:
%% \author[label1,label2]{<author name>}
%% \address[label1]{<address>}
%% \address[label2]{<address>}
\author{Sahra Yusuf}
\address{Computer Systems Lab}
\end{frontmatter}
%%
%% Start line numbering here if you want
%%
\linenumbers
%% main text
\section{Daily Log}
\begin{itemize}
\item \textbf{03/18} \\
Researched \textit{C. elegans} navigation, specifically food searching \\
\item \textbf{03/20} \\
L293D arrived \\
Began working on navigation again \\
\item \textbf{03/22} \\
Troubleshooted audio issues \\
Looked into binaural audio\\
\label{S:1}
\end{itemize}
\section{Weekly Timeline}
\begin{table}[h]
\centering
\begin{tabular}{l l l}
\hline
\textbf{Week} & \textbf{Goal} & \textbf{Met?} \\
\hline
02/25 & Finalize course & Incomplete, extended \\
& Finalize navigation code & Incomplete, extended \\
03/04 & Continue troubleshooting movement, specifically turns & Completed \\
& Continue building test course & Extended \\
03/11 & Finalize test course & Extended \\
& Finalize movement & Extended \\
03/18 & Refine navigation & Extended \\
& Add more specific algorithms & Extended \\
03/25 & Continue refining & N/A \\
& Evaluate according to existing research & N/A \\
\hline
\end{tabular}
\caption{Weekly timeline (beginning 03/04)}
\end{table}
\break
\section{Reflection}
\label{S:3}
My second L293D chip arrived last week, so I was able to resume work on navigation. I encountered an issue with the USB microphone I have been using. I have been able to detect noise normally within the Raspberry Pi environment. However, there was no reaction to sound whatsoever when running in real time. I have been using the pyALSAAUDIO library which provides audio processing functionality for Linux environments.\\
I am still troubleshooting, but do have a backup: using a designated sound sensor instead. This will grant less precision and specificity, since it simply sends a signal if sound is detected. The only necessary modification to the setup is a quiet environment, at least for initial testing. I may also use two sensors on either side of the robot in order to provide binaural capabilities. This would help with distance evaluation, or, at the very least, direction estimation.\\
This diagram from Hooke Audio describes the basic principle of binaural audio as it exists in humans. \\
\includegraphics[scale=0.17]{IMG_2194.JPG}\\
Although this idea seems to have the potential to solve many issues, I already have two predictions about problems that may arise. First, the distance between the sensors may be too short for them to actually detect a slight difference. Second, this may not accurately reflect the actual physiology of \textit{C. elegans}. I think it's still something worth testing, if only to learn something new.\\
\end{document}
%%
%% End of file `elsarticle-template-1-num.tex'. | {
"alphanum_fraction": 0.7384183435,
"avg_line_length": 37.7117647059,
"ext": "tex",
"hexsha": "ed380bfdb38e0847d78f9c37bca58bea74d1a0d8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d8d62be4c782e68020ac33a87e95a29c35a8e05e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sahrayusuf/celegans_simulation",
"max_forks_repo_path": "journal_21/elsarticle-template-1-num.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d8d62be4c782e68020ac33a87e95a29c35a8e05e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sahrayusuf/celegans_simulation",
"max_issues_repo_path": "journal_21/elsarticle-template-1-num.tex",
"max_line_length": 477,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d8d62be4c782e68020ac33a87e95a29c35a8e05e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sahrayusuf/celegans_simulation",
"max_stars_repo_path": "journal_21/elsarticle-template-1-num.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1670,
"size": 6411
} |
\documentclass[a4paper,10pt]{scrartcl}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{fancyhdr}
\pagestyle{fancy}
%\usepackage{dsfont}
\usepackage{amsfonts}
\usepackage{amsthm}
\usepackage{graphicx}
\usepackage[small,nooneline,bf,hang]{caption2}
\usepackage{float}
\usepackage{hyperref}
%opening
\title{Usage of libNMF 1.03}
\begin{document}
\fancyhead{}
\rhead{Usage}
\lhead{libNMF 1.03}
\maketitle
\section{Building the library}
The library can be build via the provided makefile, which by default uses the ``gcc'' compiler.\newline\newline
The target \emph{lib} builds the static library and target \emph{shared} builds the shared library.
By default the makefile compiles without the compiler flag \emph{-fPIC} (position independent code) which is
needed for generating the shared library, though.\newline\newline
Optionally one could alter compiler flags in the makefile before building the library.
\section{Building the library running Windows}
The libNMF library was successfully built under Windows XP by using ``Cygwin''. We alternatively testes the installation using ``Windows Services for UNIX'' and ``MinGW'' but both failed with errors when calling the Makefile.
\section{Linking the library}
Linking the library requires linking of ARPACK, BLAS and LAPACK libraries as well.
\section{Usage}
This library can be used by either calling \emph{nmfDriver} or the individual computational routines for
a non negative matrix factorization. The following example files can be found in the directory ``example''.\newline
An example call for the first case can be found in the file ``example.c''.\newline
An example call for the second case can be found in the file ``example\_withoutdriver.c''\newline
\end{document}
| {
"alphanum_fraction": 0.7789651294,
"avg_line_length": 30.6551724138,
"ext": "tex",
"hexsha": "0a4c9941ca249f9f05ea22cfc580dc279dda3bcb",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7cb446f1fda6bec1d188164e16294e92ed8efd56",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bruler/param-recommend",
"max_forks_repo_path": "src/libnmf/documentation/tex_src/nnmf_usage.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7cb446f1fda6bec1d188164e16294e92ed8efd56",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bruler/param-recommend",
"max_issues_repo_path": "src/libnmf/documentation/tex_src/nnmf_usage.tex",
"max_line_length": 227,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7cb446f1fda6bec1d188164e16294e92ed8efd56",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bruler/param-recommend",
"max_stars_repo_path": "src/libnmf/documentation/tex_src/nnmf_usage.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 459,
"size": 1778
} |
\chapter{JAXB Data Binding Framework}
Source name: \textbf{jaxb}
\section{Source grammar}
\begin{itemize}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Apply.java}{topics/fl/java3/fl/Apply.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Argument.java}{topics/fl/java3/fl/Argument.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Binary.java}{topics/fl/java3/fl/Binary.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Expr.java}{topics/fl/java3/fl/Expr.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Function.java}{topics/fl/java3/fl/Function.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/IfThenElse.java}{topics/fl/java3/fl/IfThenElse.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Literal.java}{topics/fl/java3/fl/Literal.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/ObjectFactory.java}{topics/fl/java3/fl/ObjectFactory.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Ops.java}{topics/fl/java3/fl/Ops.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/Program.java}{topics/fl/java3/fl/Program.java}\item Source artifact: \href{http://github.com/grammarware/slps/blob/master/topics/fl/java3/fl/package-info.java}{topics/fl/java3/fl/package-info.java}\item Grammar extractor: \href{http://github.com/grammarware/slps/blob/master/topics/extraction/java2bgf/slps/java2bgf/Tool.java}{topics/extraction/java2bgf/slps/java2bgf/Tool.java}\end{itemize}
\footnotesize\begin{center}\begin{tabular}{|l|}\hline
\multicolumn{1}{|>{\columncolor[gray]{.9}}c|}{\footnotesize \textbf{Production rules}}
\\\hline
$\mathrm{p}(\text{`'},\mathit{Apply},\mathrm{seq}\left(\left[\mathrm{sel}\left(\text{`Name'},str\right), \mathrm{sel}\left(\text{`Arg'},\star \left(\mathit{Expr}\right)\right)\right]\right))$ \\
$\mathrm{p}(\text{`'},\mathit{Argument},\mathrm{sel}\left(\text{`Name'},str\right))$ \\
$\mathrm{p}(\text{`'},\mathit{Binary},\mathrm{seq}\left(\left[\mathrm{sel}\left(\text{`Ops'},\mathit{Ops}\right), \mathrm{sel}\left(\text{`Left'},\mathit{Expr}\right), \mathrm{sel}\left(\text{`Right'},\mathit{Expr}\right)\right]\right))$ \\
$\mathrm{p}(\text{`'},\mathit{Expr},\mathrm{choice}([\mathit{Apply},$\\$\qquad\qquad\mathit{Argument},$\\$\qquad\qquad\mathit{Binary},$\\$\qquad\qquad\mathit{IfThenElse},$\\$\qquad\qquad\mathit{Literal}]))$ \\
$\mathrm{p}(\text{`'},\mathit{Function},\mathrm{seq}\left(\left[\mathrm{sel}\left(\text{`Name'},str\right), \mathrm{sel}\left(\text{`Arg'},\star \left(str\right)\right), \mathrm{sel}\left(\text{`Rhs'},\mathit{Expr}\right)\right]\right))$ \\
$\mathrm{p}(\text{`'},\mathit{IfThenElse},\mathrm{seq}\left(\left[\mathrm{sel}\left(\text{`IfExpr'},\mathit{Expr}\right), \mathrm{sel}\left(\text{`ThenExpr'},\mathit{Expr}\right), \mathrm{sel}\left(\text{`ElseExpr'},\mathit{Expr}\right)\right]\right))$ \\
$\mathrm{p}(\text{`'},\mathit{Literal},\mathrm{sel}\left(\text{`Info'},int\right))$ \\
$\mathrm{p}(\text{`'},\mathit{ObjectFactory},\varepsilon)$ \\
$\mathrm{p}(\text{`'},\mathit{Ops},\mathrm{choice}([\mathrm{sel}\left(\text{`EQUAL'},\varepsilon\right),$\\$\qquad\qquad\mathrm{sel}\left(\text{`PLUS'},\varepsilon\right),$\\$\qquad\qquad\mathrm{sel}\left(\text{`MINUS'},\varepsilon\right)]))$ \\
$\mathrm{p}(\text{`'},\mathit{package-info},\varphi)$ \\
$\mathrm{p}(\text{`'},\mathit{Program},\mathrm{sel}\left(\text{`Function'},\star \left(\mathit{Function}\right)\right))$ \\
\hline\end{tabular}\end{center}
\section{Normalizations}
{\footnotesize\begin{itemize}
\item \textbf{reroot-reroot} $\left[\right]$ to $\left[\mathit{Program}\right]$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`Name'}},\mathit{Argument},str\right)$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`Info'}},\mathit{Literal},int\right)$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`Function'}},\mathit{Program},\star \left(\mathit{Function}\right)\right)$
\item \textbf{anonymize-deanonymize}\\$\mathrm{p}\left(\text{`'},\mathit{IfThenElse},\mathrm{seq}\left(\left[\fbox{$\mathrm{sel}\left(\text{`IfExpr'},\mathit{Expr}\right)$}, \fbox{$\mathrm{sel}\left(\text{`ThenExpr'},\mathit{Expr}\right)$}, \fbox{$\mathrm{sel}\left(\text{`ElseExpr'},\mathit{Expr}\right)$}\right]\right)\right)$
\item \textbf{anonymize-deanonymize}\\$\mathrm{p}\left(\text{`'},\mathit{Function},\mathrm{seq}\left(\left[\fbox{$\mathrm{sel}\left(\text{`Name'},str\right)$}, \fbox{$\mathrm{sel}\left(\text{`Arg'},\star \left(str\right)\right)$}, \fbox{$\mathrm{sel}\left(\text{`Rhs'},\mathit{Expr}\right)$}\right]\right)\right)$
\item \textbf{anonymize-deanonymize}\\$\mathrm{p}\left(\text{`'},\mathit{Binary},\mathrm{seq}\left(\left[\fbox{$\mathrm{sel}\left(\text{`Ops'},\mathit{Ops}\right)$}, \fbox{$\mathrm{sel}\left(\text{`Left'},\mathit{Expr}\right)$}, \fbox{$\mathrm{sel}\left(\text{`Right'},\mathit{Expr}\right)$}\right]\right)\right)$
\item \textbf{anonymize-deanonymize}\\$\mathrm{p}\left(\text{`'},\mathit{Apply},\mathrm{seq}\left(\left[\fbox{$\mathrm{sel}\left(\text{`Name'},str\right)$}, \fbox{$\mathrm{sel}\left(\text{`Arg'},\star \left(\mathit{Expr}\right)\right)$}\right]\right)\right)$
\item \textbf{anonymize-deanonymize}\\$\mathrm{p}\left(\text{`'},\mathit{Ops},\mathrm{choice}\left(\left[\fbox{$\mathrm{sel}\left(\text{`EQUAL'},\varepsilon\right)$}, \fbox{$\mathrm{sel}\left(\text{`PLUS'},\varepsilon\right)$}, \fbox{$\mathrm{sel}\left(\text{`MINUS'},\varepsilon\right)$}\right]\right)\right)$
\item \textbf{vertical-horizontal} in $\mathit{Expr}$
\item \textbf{undefine-define}\\$\mathrm{p}\left(\text{`'},\mathit{Ops},\varepsilon\right)$
\item \textbf{eliminate-introduce}\\$\mathrm{p}\left(\text{`'},\mathit{ObjectFactory},\varepsilon\right)$
\item \textbf{eliminate-introduce}\\$\mathrm{p}\left(\text{`'},\mathit{package-info},\varphi\right)$
\item \textbf{unchain-chain}\\$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Apply}\right)$
\item \textbf{unchain-chain}\\$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Argument}\right)$
\item \textbf{unchain-chain}\\$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Binary}\right)$
\item \textbf{unchain-chain}\\$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{IfThenElse}\right)$
\item \textbf{unchain-chain}\\$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Literal}\right)$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`Apply'}},\mathit{Expr},\mathrm{seq}\left(\left[str, \star \left(\mathit{Expr}\right)\right]\right)\right)$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`Argument'}},\mathit{Expr},str\right)$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`Binary'}},\mathit{Expr},\mathrm{seq}\left(\left[\mathit{Ops}, \mathit{Expr}, \mathit{Expr}\right]\right)\right)$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`IfThenElse'}},\mathit{Expr},\mathrm{seq}\left(\left[\mathit{Expr}, \mathit{Expr}, \mathit{Expr}\right]\right)\right)$
\item \textbf{unlabel-designate}\\$\mathrm{p}\left(\fbox{\text{`Literal'}},\mathit{Expr},int\right)$
\item \textbf{extract-inline} in $\mathit{Expr}$\\$\mathrm{p}\left(\text{`'},\mathit{Expr_1},\mathrm{seq}\left(\left[str, \star \left(\mathit{Expr}\right)\right]\right)\right)$
\item \textbf{extract-inline} in $\mathit{Expr}$\\$\mathrm{p}\left(\text{`'},\mathit{Expr_2},\mathrm{seq}\left(\left[\mathit{Ops}, \mathit{Expr}, \mathit{Expr}\right]\right)\right)$
\item \textbf{extract-inline} in $\mathit{Expr}$\\$\mathrm{p}\left(\text{`'},\mathit{Expr_3},\mathrm{seq}\left(\left[\mathit{Expr}, \mathit{Expr}, \mathit{Expr}\right]\right)\right)$
\end{itemize}}
\section{Grammar in ANF}
\footnotesize\begin{center}\begin{tabular}{|l|c|}\hline
\multicolumn{1}{|>{\columncolor[gray]{.9}}c|}{\footnotesize \textbf{Production rule}} &
\multicolumn{1}{>{\columncolor[gray]{.9}}c|}{\footnotesize \textbf{Production signature}}
\\\hline
$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Expr_1}\right)$ & $\{ \langle \mathit{Expr_1}, 1\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Expr},str\right)$ & $\{ \langle str, 1\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Expr_2}\right)$ & $\{ \langle \mathit{Expr_2}, 1\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Expr_3}\right)$ & $\{ \langle \mathit{Expr_3}, 1\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Expr},int\right)$ & $\{ \langle int, 1\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Function},\mathrm{seq}\left(\left[str, \star \left(str\right), \mathit{Expr}\right]\right)\right)$ & $\{ \langle \mathit{Expr}, 1\rangle, \langle str, 1{*}\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Program},\star \left(\mathit{Function}\right)\right)$ & $\{ \langle \mathit{Function}, {*}\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Expr_1},\mathrm{seq}\left(\left[str, \star \left(\mathit{Expr}\right)\right]\right)\right)$ & $\{ \langle str, 1\rangle, \langle \mathit{Expr}, {*}\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Expr_2},\mathrm{seq}\left(\left[\mathit{Ops}, \mathit{Expr}, \mathit{Expr}\right]\right)\right)$ & $\{ \langle \mathit{Ops}, 1\rangle, \langle \mathit{Expr}, 11\rangle\}$\\
$\mathrm{p}\left(\text{`'},\mathit{Expr_3},\mathrm{seq}\left(\left[\mathit{Expr}, \mathit{Expr}, \mathit{Expr}\right]\right)\right)$ & $\{ \langle \mathit{Expr}, 111\rangle\}$\\
\hline\end{tabular}\end{center}
\section{Nominal resolution}
Production rules are matched as follows (ANF on the left, master grammar on the right):
\begin{eqnarray*}
\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Expr_1}\right) & \bumpeq & \mathrm{p}\left(\text{`'},\mathit{expression},\mathit{apply}\right) \\
\mathrm{p}\left(\text{`'},\mathit{Expr},str\right) & \bumpeq & \mathrm{p}\left(\text{`'},\mathit{expression},str\right) \\
\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Expr_2}\right) & \bumpeq & \mathrm{p}\left(\text{`'},\mathit{expression},\mathit{binary}\right) \\
\mathrm{p}\left(\text{`'},\mathit{Expr},\mathit{Expr_3}\right) & \bumpeq & \mathrm{p}\left(\text{`'},\mathit{expression},\mathit{conditional}\right) \\
\mathrm{p}\left(\text{`'},\mathit{Expr},int\right) & \bumpeq & \mathrm{p}\left(\text{`'},\mathit{expression},int\right) \\
\mathrm{p}\left(\text{`'},\mathit{Function},\mathrm{seq}\left(\left[str, \star \left(str\right), \mathit{Expr}\right]\right)\right) & \Bumpeq & \mathrm{p}\left(\text{`'},\mathit{function},\mathrm{seq}\left(\left[str, \plus \left(str\right), \mathit{expression}\right]\right)\right) \\
\mathrm{p}\left(\text{`'},\mathit{Program},\star \left(\mathit{Function}\right)\right) & \Bumpeq & \mathrm{p}\left(\text{`'},\mathit{program},\plus \left(\mathit{function}\right)\right) \\
\mathrm{p}\left(\text{`'},\mathit{Expr_1},\mathrm{seq}\left(\left[str, \star \left(\mathit{Expr}\right)\right]\right)\right) & \Bumpeq & \mathrm{p}\left(\text{`'},\mathit{apply},\mathrm{seq}\left(\left[str, \plus \left(\mathit{expression}\right)\right]\right)\right) \\
\mathrm{p}\left(\text{`'},\mathit{Expr_2},\mathrm{seq}\left(\left[\mathit{Ops}, \mathit{Expr}, \mathit{Expr}\right]\right)\right) & \Bumpeq & \mathrm{p}\left(\text{`'},\mathit{binary},\mathrm{seq}\left(\left[\mathit{expression}, \mathit{operator}, \mathit{expression}\right]\right)\right) \\
\mathrm{p}\left(\text{`'},\mathit{Expr_3},\mathrm{seq}\left(\left[\mathit{Expr}, \mathit{Expr}, \mathit{Expr}\right]\right)\right) & \bumpeq & \mathrm{p}\left(\text{`'},\mathit{conditional},\mathrm{seq}\left(\left[\mathit{expression}, \mathit{expression}, \mathit{expression}\right]\right)\right) \\
\end{eqnarray*}
This yields the following nominal mapping:
\begin{align*}\mathit{jaxb} \:\diamond\: \mathit{master} =\:& \{\langle \mathit{Expr_2},\mathit{binary}\rangle,\\
& \langle \mathit{Expr_3},\mathit{conditional}\rangle,\\
& \langle int,int\rangle,\\
& \langle \mathit{Function},\mathit{function}\rangle,\\
& \langle str,str\rangle,\\
& \langle \mathit{Program},\mathit{program}\rangle,\\
& \langle \mathit{Expr},\mathit{expression}\rangle,\\
& \langle \mathit{Expr_1},\mathit{apply}\rangle,\\
& \langle \mathit{Ops},\mathit{operator}\rangle\}\end{align*}
Which is exercised with these grammar transformation steps:
{\footnotesize\begin{itemize}
\item \textbf{renameN-renameN} $\mathit{Expr_2}$ to $\mathit{binary}$
\item \textbf{renameN-renameN} $\mathit{Expr_3}$ to $\mathit{conditional}$
\item \textbf{renameN-renameN} $\mathit{Function}$ to $\mathit{function}$
\item \textbf{renameN-renameN} $\mathit{Program}$ to $\mathit{program}$
\item \textbf{renameN-renameN} $\mathit{Expr}$ to $\mathit{expression}$
\item \textbf{renameN-renameN} $\mathit{Expr_1}$ to $\mathit{apply}$
\item \textbf{renameN-renameN} $\mathit{Ops}$ to $\mathit{operator}$
\end{itemize}}
\section{Structural resolution}
{\footnotesize\begin{itemize}
\item \textbf{narrow-widen} in $\mathit{function}$\\$\star \left(str\right)$\\$\plus \left(str\right)$
\item \textbf{narrow-widen} in $\mathit{program}$\\$\star \left(\mathit{function}\right)$\\$\plus \left(\mathit{function}\right)$
\item \textbf{narrow-widen} in $\mathit{apply}$\\$\star \left(\mathit{expression}\right)$\\$\plus \left(\mathit{expression}\right)$
\item \textbf{permute-permute}\\$\mathrm{p}\left(\text{`'},\mathit{binary},\mathrm{seq}\left(\left[\mathit{operator}, \mathit{expression}, \mathit{expression}\right]\right)\right)$\\$\mathrm{p}\left(\text{`'},\mathit{binary},\mathrm{seq}\left(\left[\mathit{expression}, \mathit{operator}, \mathit{expression}\right]\right)\right)$
\end{itemize}}
| {
"alphanum_fraction": 0.6910457469,
"avg_line_length": 116.2773109244,
"ext": "tex",
"hexsha": "14150e21e5758e5b63088c7d4a179d6cd316b314",
"lang": "TeX",
"max_forks_count": 13,
"max_forks_repo_forks_event_max_datetime": "2020-05-26T10:10:18.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-18T13:50:07.000Z",
"max_forks_repo_head_hexsha": "a39bb0f8454de8508269d4467f2501badbb2cc4a",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "grammarware/slps",
"max_forks_repo_path": "topics/convergence/guided/bgf/jaxb.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a39bb0f8454de8508269d4467f2501badbb2cc4a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "grammarware/slps",
"max_issues_repo_path": "topics/convergence/guided/bgf/jaxb.tex",
"max_line_length": 1772,
"max_stars_count": 19,
"max_stars_repo_head_hexsha": "a39bb0f8454de8508269d4467f2501badbb2cc4a",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "grammarware/slps",
"max_stars_repo_path": "topics/convergence/guided/bgf/jaxb.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-08T11:23:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-18T13:50:02.000Z",
"num_tokens": 4982,
"size": 13837
} |
% ----------------------------------------------------------------------------
% Abstract
% ----------------------------------------------------------------------------
\documentclass[thesis.tex]{subfiles}
\begin{document}
\chapter*{\centering Abstract}
% Insert abstract here
\end{document}
| {
"alphanum_fraction": 0.3537414966,
"avg_line_length": 26.7272727273,
"ext": "tex",
"hexsha": "e1d5b221b3b042b25f003d93a8e010cff4a1699f",
"lang": "TeX",
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2021-11-23T03:46:02.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-07-30T13:38:56.000Z",
"max_forks_repo_head_hexsha": "a723a7e2e47cad93202a3358189a73c9ae342044",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "hbristow/quthesis",
"max_forks_repo_path": "4-abstract.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a723a7e2e47cad93202a3358189a73c9ae342044",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "hbristow/quthesis",
"max_issues_repo_path": "4-abstract.tex",
"max_line_length": 78,
"max_stars_count": 17,
"max_stars_repo_head_hexsha": "a723a7e2e47cad93202a3358189a73c9ae342044",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "hbristow/quthesis",
"max_stars_repo_path": "4-abstract.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-09T07:51:35.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-30T05:50:57.000Z",
"num_tokens": 40,
"size": 294
} |
% corrected VD 78
\subsubsection{Feature extraction with MFCC in Python}~\label{librosa}~\\
% -Explanation of the English dataset
For the pilot study, we use an audio dataset of English spoken
words\cite{DBLP:journals/corr/abs-1804-03209}. It contains 105829 utterances of
35 words. This dataset contains 2618 persons pronouncing these different words.
Every utterance is roughly $1s$ long, sampled at $16kHz$ and saved as a WAVE
format file. From this dataset we choose the words $w \in \{'zero', 'one',
'two', \dots , 'eight', 'nine'\}$.\\
We use the Python library \textit{librosa}~\cite{librosa} to extract the MFCCs from the
dataset. Instead of processing the whole dataset, the following steps are
applied to one single audio file $f$.\\
\begin{enumerate}[label=\arabic*.]
\item Let $f$ be an audio file sampled at $16kHz$, \textit{Librosa} loads the
song as follows:
\begin{lstlisting}
import librosa
y, sr = librosa.load(f, sr=16000, mono=True, duration=1)
\end{lstlisting}
with $y$ being the audio time series and $sr$ the sample rate of $y$.\\
\item The next step is to pad the time series $y$ if it is smaller than $1s$:
\begin{lstlisting}
import numpy as np
if y.size < 16000:
rest = 16000 - y.size
left = rest // 2
right = rest - left
y = np.pad(y, (left, right), 'reflect'))
\end{lstlisting}
The time series is reflected on both sides to get the correct size.\\
\item After matching the time series to the correct size, the MFCCs can be extracted
with:
\begin{lstlisting}
mfccs = librosa.feature.mfcc(y=y, sr=sr)
\end{lstlisting}
The function \textit{.mfcc()} returns an \textit{np.ndarray} with the dimension:
\begin{equation*}
shape=(n\_mfcc,t)
\end{equation*}
with $n\_mfcc$ being the number of MFCCs and $t$ the number of frames.
\item The last step is to flatten this matrix to a vector which can be stored in a
data frame for later training.
\begin{lstlisting}
row = mfccs.T.flatten()
\end{lstlisting}
The matrix $mfccs$ is transposed and flattened. This concatenates the columns
together forming a vector.
\end{enumerate}~\\
These steps are then applied to the entire dataset. Each computed vector is
appended to the data frame which is going to be stored as a $.csv$ file. The
entire code snippet is given in the Appendix in Figure~\ref{mfccsnip}.
| {
"alphanum_fraction": 0.7393199651,
"avg_line_length": 38.8813559322,
"ext": "tex",
"hexsha": "3d9644d7b7bdd5c1635e38c8ef7b7580406da5dd",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ca3f7bee2d4740c5c7ad9f586766ab04a0e5f58b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Lemswasabi/bsps3-report",
"max_forks_repo_path": "sections/scientific/fr1/mfccextractionpython.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ca3f7bee2d4740c5c7ad9f586766ab04a0e5f58b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Lemswasabi/bsps3-report",
"max_issues_repo_path": "sections/scientific/fr1/mfccextractionpython.tex",
"max_line_length": 87,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ca3f7bee2d4740c5c7ad9f586766ab04a0e5f58b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Lemswasabi/bsps3-report",
"max_stars_repo_path": "sections/scientific/fr1/mfccextractionpython.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 678,
"size": 2294
} |
\subsection{Static vs. Run-time development in Web-technologies}
%% #ModernWebDev #NotLive #Security #EditCompileRun
The ongoing evolution of the JavaScript language and Web-technologies in the browser make developing for the browser a rich but complex endeavor. The constant pressure to keep the Web technologies secure yield a high price when it comes to dynamic features of JavaScript and Web-development. Many of the new technologies can only be experienced in very static development work-flows of editing files and compiling them. Technologies such as babel and Web-components allow to write better JavaScript and HTML, but at the cost of having to trade in the potentially high dynamic development experience at run-time against the slow edit, compile run experience.
\section{Module Systems in the Browser}
\section{Implementation Notes}
- Enable simple "doits"
- Enable printits (we expect return value)
- accessing "context variables"
- simple case: binding "this"
- binding complete context
- DevLayer and Module system... we expect modules with identity and not copies...
- how to make this reflection capabilities available to other tools/modules and not just a global standard feature? E.g. constraints lib
\input{problems.tex}
\section{Modules}
\begin{verbatim}
import * as m from 'demos/mymodule.js'
export function f() {}
f() // 1. public...
m.foo() // 2. libraries...
var b = 3 // 3. module wide scope???
function() {
var c = 4 // 4. private nested code [NO] use debugger for this or FW
}
\end{verbatim}
\paragraph{Persistence of Lively Objects and Web-components}
Persistence of tools and applications can be achieved if we can (de-)serialize their (object) state. We need persistence of tools and applications and active content for the following reasons:
\begin{itemize}
\item working in a prototypical way: creating one instance of a tool, application or active object and use it later (either the same or make copies of it)
\item copying of objects: deep copying of complex objects comes for free when we can (de-)serialize them...
\end{itemize}
%% #FutureWork
%% #Figure Editing a method in a class in a module vs. editing the template of a Web-component #TODO
\subsection{Copying of Objects}
\begin{itemize}
\item creating a button, customizing it and then cloning it... generally, reusing anything anywhere
\item making experiments: being able to clone the state of a tool, application or content allows users and developers
\item copying of objects is useful in interactive development of (active)-objects (tools and applications) to more freely experiment with the their object under development %% #LiveProgramming #Cloning #Patrick?
\end{itemize}
\subsection{Abstraction Levels hinder and help in Live programming}
The abstraction mechanism of elements hidden under a shadow root enables use to "reload"/update/migrate partial state
State we decided to preserve:
\begin{itemize}
\item HTML Element properties
\item HTML Element childNodes
\item Custom state through implementing "livelyMigrate" API
\end{itemize}
\subsection{Web-component Templates}
%% #Abstractions in #HTML #NoLoops
Templates are a separation of the UI from the code. Since templates are themselves just declarative HTML they don't allow the use of typical programming language abstractions such as loops or function calls. Luckily, the templates are usually accompanied by scripts that can provide all kinds of abstractions since they are just JavaScript code. In Lively4 we experimented with multiple ways to describe our Web-components with HTML templates and scripts:
\begin{itemize}
\item one template file with one huge script tag, that defines a custom HTML Element class (or prototype), and does the registering itself.
\item one template file with custom script tags that each only add one method to the HTML elements in the templates
\item one template file and a separate JavaScript file
\end{itemize}
\subsection{Instance Migration as Simple Replay Mechanism}
In Lively Kernel as in other Smalltalk-like systems, the feedback of code change can only be observed in new behavior, because only the behavior in classes is updated. For example, changing code in Smalltalk that is executed in a "step" method gives immediate feedback. But on the other hand, the changes to code in the initialize method cannot give feedback at run-time without instantiating new instances.
Systems that allow for some form of mutable past, may be able to give feedback changes of the initialize method (and also other methods called in the past).
\paragraph{Our Design in Lively4}
Code in the "initialize" method can produce feedback, because we migrate all graphical objects that are affected by the change, and execute "initialize" again, before migrating their state.
Currently, in Lively4 the feedback relies on idiomatic instance migration. Classes can implement a special migrate method, which allows new instances to copy or adapt state from the old instance.
Alternatively, we could migrate all state automatically, which would lead to the problem, that the old state might shadow the new state intended by the programmer. So we leave it to the programmer to decide which state is relevant and which is only transient.
For all graphical objects (DOM elements) we consider their attributes as persistent state, we want to preserve.
Object properties are not declared in JavaScript, we are cannot annotate transient from persist-able state.
Migrating objects without that way means (currently) Lively4 does not preserve object identity.
\paragraph{Alternative more Smalltalk-ish Design}
Instead of migrating instances, in Smalltalk the behavior is changed globally in the classes. Through vm-support of instance migration, objects can keep their identity, even if their memory layout changes completely.
This is not a problem we are dealing here....
For updating a method, or complete class, we could we could:
\begin{itemize}
\item a) modify the properties/methods of the prototype
\item b) change the prototype on the instance side (deprecated \_\_proto\_\_ )
\item c) wrap classes in Proxies that allow changing the reference to the class
\end{itemize}
%%% #Evaluation
\subsection{Compare Bouncing Ball (SoapBubble) development experience}
\begin{itemize}
\item in HTML (Show the lack of abstraction and edit, and reload) cycle with lack of "preservation of context"
\item Lively Kernel (show development at run-time with "preservation of context"
\item Web Components (nice abstraction on source code level, but lack of abstraction at run-time)
\item Lively4
\end{itemize}
| {
"alphanum_fraction": 0.7864949843,
"avg_line_length": 50.9847328244,
"ext": "tex",
"hexsha": "955c46db8e33503d7d758309564331d672e928a8",
"lang": "TeX",
"max_forks_count": 22,
"max_forks_repo_forks_event_max_datetime": "2021-05-02T02:04:41.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-10-22T14:39:35.000Z",
"max_forks_repo_head_hexsha": "03cf81fe29d371f3d37ba82b9b2c6ef7fba5c6c5",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "kustomzone/lively4-core",
"max_forks_repo_path": "doc/PX17/notes/cutout.tex",
"max_issues_count": 390,
"max_issues_repo_head_hexsha": "03cf81fe29d371f3d37ba82b9b2c6ef7fba5c6c5",
"max_issues_repo_issues_event_max_datetime": "2021-07-12T12:33:35.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-12-04T08:59:08.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "kustomzone/lively4-core",
"max_issues_repo_path": "doc/PX17/notes/cutout.tex",
"max_line_length": 658,
"max_stars_count": 62,
"max_stars_repo_head_hexsha": "b536f8582a60b1f264adaa557acf275e23ceac6c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "yoshikiohshima/lively4-core",
"max_stars_repo_path": "doc/PX17/notes/cutout.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-03T18:22:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-10-19T14:16:37.000Z",
"num_tokens": 1424,
"size": 6679
} |
\section{Preliminaries} \label{gua:sec:prelim}\label{gua:sec:definitions}
Many definitions intersect with those defined in previous chapters,
but to keep the chapter self-contained we define them here.
Notation:
$\bbB = \{\true,\false\}$ is the set of Boolean values,
$\bbN$ is the set of natural numbers (excluding $0$),
$\bbN_0 = \bbN\cup\{0\}$,
$[k]$ is the set $\{i \in \bbN \| i \leq k\}$
and $[0..k]$ is the set $[k] \cup \{0\}$ for $k \in \bbN$.
For a sequence $x=x_1x_2\ldots$ denote the $i$-$j$-subsequence as $x\slice{i}{j}$,
i.e., ${x\slice{i}{j}}=x_i \ldots x_j$.
\subsection{System Model} \label{gua:sec:model}
We consider systems $A {\parallel} B^n$, usually written $\largesys$,
consisting of
one copy of a process template $A$ and $n$ copies of a process template $B$,
in an interleaving parallel composition.%
%% AK: moved to a separate note, to be able to explain why we _cannot_ generalize for 1-conj
%\footnote{As shown in \cite{Emerson00}, cutoffs for this case generalize to cutoffs
% for systems of the form $A^m {\parallel} B^n$, and further
% to systems with an arbitrary number of process templates
% $U_1^{n_1} {\parallel} \ldots {\parallel} U_m^{n_m}$.}
We distinguish objects that belong to different templates by indexing them with
the template. E.g., for process template $U \in \{A,B\}$, $Q_U$ is the set of
states of $U$. For this section, fix two disjoint finite sets $Q_A$, $Q_B$ as
sets of states of process templates $A$ and $B$, and a positive integer $n$.
\parbf{Processes} A \emph{process template}
is a transition system
$U=(\stateset, \init, \inputs, \delta)$ with
\begin{itemize}
\item $\stateset$ is a finite set of states including the
initial state $\init$,
\item $\inputs$ is a finite input alphabet,
\item $\delta: \stateset \times \inputs \times \mP(Q_A \cupdot Q_B) \times \stateset$ is a guarded transition relation.
\end{itemize}
A process template is \emph{closed} if $\inputs = \emptyset$, and otherwise \emph{open}.
By $\transition{q_i}{q_j}{e:g}$
we denote a process transition from $q_i$ to $q_j$
for input $e \in \Sigma$ and guarded by guard $g \in \mP(Q_A \cupdot Q_B)$.
We skip the input $e$ and guard $g$
if they are not important or can be inferred from the context.
We define the size $\card{U}$ of a process template $U \in \{A,B\}$ as $\card{\stateset_U}$. A copy of a template $U$ will be called a \emph{$U$-process}.
Different $B$-processes are distinguished by subscript, i.e., for $i \in [1..n]$, $B_i$ is the $i$th copy of $B$, and $\state_{B_i}$ is a state of $B_i$. A state of the $A$-process is denoted by $q_A$.
For the rest of this subsection, fix templates $A$ and $B$. We assume that $\inputs_A \cap \inputs_B = \emptyset$. We will also write $p$ for a process in $\{ A, B_1, \ldots, B_n\}$, unless $p$ is specified explicitly.
We often denote the set $\{B_1,...,B_n\}$ as $\mB$.
\parbf{Disjunctive and conjunctive systems}
In a system $\largesys$,
consider the global state $s = (\state_A,\state_{B_1},\ldots,\state_{B_n})$ and
global input $e=(\localin_A,\localin_{B_1},\ldots,\localin_{B_n})$.
We write $s(p)$ for $q_p$, and $e(p)$ for $\sigma_p$.
A local transition $(\state_p,\localin_p,g,\state_p') \in \delta_U$ of a process $p$ is \emph{enabled for $s$ and $e$}
if the \emph{guard} $g$ is satisfied by the state $s$ wrt.\ the process $p$, written $(s,p) \models g$ (defined below).
The semantics of $(s,p) \models g$ differs for disjunctive and conjunctive systems:
%
\begin{align*}
\text{In disjunctive systems: } & (s,p) \models g \text{~~~iff~~~}
\exists p' \in \{A,B_1,\ldots,B_n\} \setminus \{p\}:\ \ \state_{p'} \in g. \\
\text{In conjunctive systems: } & (s,p) \models g \text{~~~iff~~~}
\forall p' \in \{A,B_1,\ldots,B_n\} \setminus \{p\}:\ \ \state_{p'} \in g.
\end{align*}
Note that we check containment in the guard (disjunctively or conjunctively)
only for local states of processes \emph{different from} $p$. A process is \emph{enabled} for $s$ and $e$ if at least one of its transitions is enabled for $s$ and $e$, otherwise it is \emph{disabled}.
Like Emerson and Kahlon~\cite{Emerson00},
we assume that in conjunctive systems $\init_A$ and $\init_B$ are contained in all guards,
i.e., they act as neutral states.
Furthermore, we call a conjunctive system \emph{$1$-conjunctive} if every guard is of the form $(Q_A \cupdot Q_B) \setminus \{q\}$ for some $q \in Q_A\cupdot Q_B$.
Then, \largesys is defined as the transition
system $(S,\init_S,\globIn,\delta)$ with
\begin{itemize}
\item set of global states $S = \stateset_A \times \stateset_B^{n}$,
\item global initial state $\init_S = (\initstate_A,\initstate_B,\ldots,\initstate_B)$,
\item set of global inputs $\globIn = (\inputs_A) \times (\inputs_B)^{n}$,
\item and global transition relation $\delta \subseteq S \times \globIn \times S$ with $(s,e,s') \in \delta$ iff
\begin{enumerate}[label=\roman*)]
\item $s=(\state_A,\state_{B_1},\ldots,\state_{B_n})$,
\item $e=(\localin_A, \localin_{B_1},\ldots,\localin_{B_n})$, and
\item $s'$ is obtained from $s$ by replacing one local state $\state_p$ with a new local state $\state_p'$, where $p$ is a $U$-process with local transition $(\state_{p},\localin_{p},g,\state_p') \in \delta_U$ and $(s,p) \models g$.
Thus, we consider so-called interleaved systems,
where in each step exactly one process transits.
\end{enumerate}
\end{itemize}
We say that a system $\largesys$ is \emph{of type} $(A,B)$. It is called a
\emph{conjunctive system} if guards are interpreted conjunctively, and a
\emph{disjunctive system} if guards are interpreted disjunctively.
A system is \emph{closed} if all of its templates are closed.
\parbf{Runs}
A \emph{configuration} of a system is a triple $(s,e,p)$, where $s \in S$, $e
\in \globIn$, and $p$ is either a system process, or the special symbol $\bot$.
A \emph{path} of a system is a configuration sequence
$x = (s_1,e_1,p_1),(s_2,e_2,p_2),\ldots$ such that, for all $\time < |x|$, there is a
transition $(s_\time,e_\time,s_{\time+1}) \in \delta$ based on a local
transition of process $p_\time$. We say that process
$p_\time$ \emph{moves} at \emph{moment} $\time$.
Configuration $(s,e,\bot)$ appears
iff all processes are disabled for $s$ and $e$.
Also, for every $p$ and $\time < |x|$:
either $e_{\time+1}(p) = e_\time(p)$ or process $p$ moves at moment $\time$.
That is, the environment keeps the input to each process unchanged until
the process can read it.\footnote{By only considering inputs that are actually processed, we
approximate an
action-based semantics. Paths that do not fulfill this requirement are not
very interesting, since the environment can violate any interesting
specification that involves input signals by manipulating them when the
corresponding process is not allowed to move.}
A system \emph{run} is a maximal path starting in the initial state. Runs are either infinite, or they end in a configuration $(s,e,\bot)$. We say that a run is \emph{initializing} if every
%$B$-process
process
that moves infinitely often also visits
%$\initstate_B$
its $\initstate$
infinitely often.
Given a system path $x = (s_1,e_1,p_1),(s_2,e_2,p_2),\ldots$ and a process $p$, the \emph{local path} of $p$ in $x$ is the projection $x(p) = (s_1(p),e_1(p)),(s_2(p),e_2(p)),\ldots$ of $x$ onto local states and inputs of $p$.
Similarly, we define the projection on two processes $p_1,p_2$ denoted by $x(p_1,p_2)$.
%The \emph{destuttering} $\destutter(x)$\ak{make it work with inf runs} of a (local) path \sj{local path not defined} $x=x_0,x_1,\ldots$ is obtained by removing stuttering steps from the sequence, i.e., $\destutter(x)$ is the maximal subsequence $x'$ of $x$ such that for every $\time$ we have $x'_\time \neq x'_{\time+1}$. Two (local) paths $x$ and $y$ are \emph{stutter-equivalent}, written $x \simeq y$, if $\destutter(x)=\destutter(y)$. Define an extension of $\destutter$ to sets of paths in the obvious way. Then two systems $S_1, S_2$ are \emph{stutter-equivalent}, written $S_1 \simeq S_2$, if $\destutter(X_1) = \destutter(X_2)$, where $X_i$ is the set of all infinite runs of system $S_i$.
\parbf{Deadlocks and fairness}
A run is \emph{globally deadlocked} if it is finite.
An infinite run is \emph{locally deadlocked} for process $p$ if there exists $\time$ such that $p$ is disabled for all $s_{\time'},e_{\time'}$ with $\time'\ge \time$. A run is \emph{deadlocked} if it is locally or globally deadlocked.
A system \emph{has a (local/global) deadlock} if it has a (locally/globally) deadlocked run. Note that the absence of local deadlocks for all $p$ implies the absence of global deadlocks, but not the other way around.
A run $(s_1,e_1,p_1), (s_2,e_2,p_2),...$ is \emph{unconditionally-fair} if every process moves infinitely often.
A run is \emph{strong-fair} if it is infinite and, for every process $p$, if $p$ is enabled infinitely often, then $p$ moves infinitely often.
%\sj{weak fairness needed?} Finally, $x$ is \emph{weak-fair} if it is infinite and for every process $p$, if there exists $t$ such that $p$ is enabled for every $s_{\time'}, e_{\time'}$ with $\time' \ge \time$, then $p$ moves infinitely often.
We will discuss the role of deadlocks and fairness in synthesis in Section~\ref{gua:sec:paramsynt}.
\begin{remark}[$A^m {\parallel} B^n$]
One usually starts with studying parameterized systems of the form $A^n$ (having one process template),
then proceeds to systems of the form $A^m {\parallel} B^n$ (having two templates)
and $U_1^{n_1} {\parallel} \ldots {\parallel} U_m^{n_m}$ (having an arbitrary fixed number of templates).
Our work studies systems $A {\parallel} B^n$,
which have one $A$-process and a parameterized number of $B$-processes,
because the results for such systems can be generalized to systems $U_1^{n_1} {\parallel} \ldots {\parallel} U_m^{n_m}$
(see~\cite{Emerson00} for details).
This generalization works for our results as well,
except for the cutoffs for deadlock detection that are restricted to 1-conjunctive systems of the form $A\parallel B^n$
(Section~\ref{gua:sec:cutoffs}).
\end{remark}
\subsection{Specifications}
\label{gua:sec:semantics}
Fix templates $(A,B)$.
We consider formulas in $\LTLmX$---$\LTL$ without the next-time operator $\nextt$---%
that are prefixed by path quantifiers $\E$ or $\A$
(for LTL and path quantifiers see Section~\ref{defs:ctlstar}).
Let $h(A,B_{i_1},\ldots,B_{i_k})$ be an $\LTLmX$ formula over atomic propositions from $Q_A \cup \Sigma_A$ and indexed propositions from $(Q_B \cup \Sigma_B) \times \{i_1,\ldots,i_k\}$.
For a system $\largesys$ with $n \geq k$ and every $i_j \in [1..n]$,
satisfaction of $\A h(A,B_{i_1},\ldots,B_{i_k})$ and $\E h(A,B_{i_1},\ldots,B_{i_k})$ is defined in the usual way.
\parbf{Parameterized specifications} \label{gua:sec:parameterized}
A \emph{parameterized specification} is a temporal logic formula
with indexed atomic propositions and quantification over indices.
We consider formulas of the forms
$\forall{i_1,\ldots,i_k.} \A h(A,B_{i_1},\ldots,B_{i_k})$ and\\
$\forall{i_1,\ldots,i_k.} \E h(A,B_{i_1},\ldots,B_{i_k})$.
For a given $n \geq k$,
$$
\largesys \models \forall{i_1,{\ldots},i_k.} \A h(A,B_{i_1},{\ldots},B_{i_k})
$$
~iff~
$$
\largesys \models \!\!\!\!\!\!\!\!\bigwedge_{j_1 \neq {\ldots} \neq j_k \in [1..n]}\!\!\!\!\!\!\!\!\A h(A,B_{j_1},{\ldots},B_{j_k}).
$$
By symmetry of guarded systems (see~\cite{Emerson00}),
the second formula is equivalent to
$\largesys \models \A h(A,B_1,\ldots,B_k)$.
The formula $\A h(A,B_1,\ldots,B_k)$ is denoted by $\A h(A,B^{(k)})$,
and we often use it instead of the original $\forall{i_1,\ldots,i_k.} \A h(A,B_{i_1},...,B_{i_k})$.
For formulas with the path quantifier $\E$,
satisfaction is defined analogously
and is equivalent to satisfaction of $\E h(A,B^{(k)})$.
\begin{example}
Consider the formula
$$
\forall{i_1,i_2}.\A \big(\G (r_{i_1} \impl \F g_{i_1}) \land \G \neg (g_{i_1} \land g_{i_2})\big).
$$
By our definition, its satisfaction by a system $(A,B)^{(1,3)}$ means
\begin{align*}
(A,B)^{(1,3)} \models
\A \left(
\begin{aligned}
&\G(r_1 \impl \F g_1) \land \G(r_2 \impl \F g_2) \land \G(r_3 \impl \F g_3) \land \\
&\G \neg (g_1 \land g_2) \land \G \neg (g_1 \land g_3) \land \G \neg (g_2 \land g_3)
\end{aligned}
\right),
\end{align*}
where $g_1$ and $r_1$ refer to the propositions $g$ and $r$ of the process $B_1$,
$g_2$ and $r_2$ belong to $B_2$, and so on.
By symmetry, the latter satisfaction is equivalent to
\begin{align*}
(A,B)^{(1,3)} \models
\A \left(
\begin{aligned}
&\G(r_1 \impl \F g_1) \land \G(r_2 \impl \F g_2) \land \\
&\G \neg (g_1 \land g_2)
\end{aligned}
\right).
\end{align*}
Note that this formula talks about processes $B_1$ and $B_2$, but does not mention $B_3$.
\end{example}
\parbf{Specification of fairness and local deadlocks}
It is often convenient to express fairness assumptions and local deadlocks
as parameterized specifications.
To this end,
define auxiliary atomic propositions $\sched_p$ and $\enabled_p$ for every process $p$ of system $(A,B)^{(1,n)}$. At moment $\time$ of a given run $(s_1,e_1,p_1),(s_2,e_2,p_2), \ldots$, let $\sched_p$ be true whenever $p_\time = p$, and let $\enabled_p$ be true if $p$ is enabled for $s_\time, e_\time$. Note that we only allow the use of these propositions to define fairness, but not in general specifications.
Then, an infinite run is
\begin{itemize}
\item \emph{local-deadlock-free} if it satisfies $\forall{p}. \GF \enabled_p$, abbreviated as $\spec_{\neg dead}$,
\item \emph{strong-fair} if it satisfies $\forall{p}. \GF \enabled_p \impl \GF \sched_p$, abbreviated as $\spec_{strong}$, and
\item \emph{unconditionally-fair} if it satisfies $\forall{p}. \GF \sched_p$, abbreviated as $\spec_{uncond}$.
%\item \sj{needed?:}\emph{weak-fair} if it satisfies $\forall{p}. \A \spec_{weak}$, where $\spec_{weak} = \FG \enabled_p \impl \GF \sched_p$.
\end{itemize}
If $f \in \{strong, uncond\}$ is a fairness notion and
$\A h(A,B^{(k)})$
a specification, then we write
$\A_{f} h(A,B^{(k)})$ for $\A (\spec_{f}
\rightarrow h(A,B^{(k)}))$.
Similarly, we write $\E_{f} h(A,B^{(k)})$ for $\E (\spec_{f} \land h(A,B^{(k)}))$.
\subsection{Model Checking and Synthesis Problems}
\label{gua:sec:nonparameterized_synthesis}
%
Given a system $\largesys$ and a specification $\A h(A,B^{(k)})$, where $n \ge k$. Then:
\begin{itemize}
\item the \emph{model checking problem} is to decide whether $\largesys \models \A h(A,B^{(k)})$,
\item the \emph{deadlock detection problem} is to decide whether $\largesys$
does not have global nor local deadlocks,
%\item the \emph{deadlock detection problem} is to decide whether all runs of $\largesys$
%are infinite and $\largesys \models \A \spec_{\neg dead}$,
%i.e., there are no local deadlocks,
\item the \emph{parameterized model checking problem} (PMCP) is to decide whether $\forall m \ge n:\ (A,B)^{(1,m)} \models \A h(A,B^{(k)})$, and
%\item the \emph{parameterized deadlock detection problem} is to decide whether for all $m \ge n$, all runs of $(A,B)^{(1,m)}$ are infinite and $(A,B)^{(1,m)} \models \A \spec_{\neg dead}$.
\item the \emph{parameterized deadlock detection problem} is to decide whether,
for all $m \ge n$, $(A,B)^{(1,m)}$ does not have global nor local deadlocks.
\end{itemize}
For a given number $n \in \bbN$ and specification $\A h(A,B^{(k)})$ with $n \ge k$,
\begin{itemize}
\item the \emph{template synthesis problem} is to find process templates $A,B$ such that
$\largesys \models \A h(A,B^{(k)})$ and $\largesys$ does not have global deadlocks\footnote{\label{footnote:local-deadlocks}Here we do not explicitly mention local deadlocks because they can be specified as a part of $\A h(A,B^{(k)})$.}
\item
the \emph{bounded template synthesis problem} for a pair of bounds $(\bound_A,\bound_B) \in \bbN \times \bbN$
is to solve the template synthesis problem with
$\card{A} \leq \bound_A$ and $\card{B} \leq \bound_B$.
\item the \emph{parameterized template synthesis problem} is to find process templates $A,B$ such that $\forall m \ge n:\ (A,B)^{(1,m)} \models \A h(A,B^{(k)})$ and $(A,B)^{(1,m)}$ does not have global deadlocks\footnoteref{footnote:local-deadlocks}.
\end{itemize}
Similarly, we define problems for specifications having $\E$ instead of $\A$.
The definitions can be flavored with different notions of fairness.
| {
"alphanum_fraction": 0.6956972841,
"avg_line_length": 60.6851851852,
"ext": "tex",
"hexsha": "4e9e1b2324ffe0f7589909970b0e3321103f80bb",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "74a7a4c6ed06aa2894d2ba05f417f5f812730b78",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "5nizza/phd-thesis",
"max_forks_repo_path": "thesis/guarded-systems/prelim.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "74a7a4c6ed06aa2894d2ba05f417f5f812730b78",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "5nizza/phd-thesis",
"max_issues_repo_path": "thesis/guarded-systems/prelim.tex",
"max_line_length": 699,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "74a7a4c6ed06aa2894d2ba05f417f5f812730b78",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "5nizza/phd-thesis",
"max_stars_repo_path": "thesis/guarded-systems/prelim.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5379,
"size": 16385
} |
% !TEX program = xelatex
\documentclass{resume}
%\usepackage{zh_CN-Adobefonts_external} % Simplified Chinese Support using external fonts (./fonts/zh_CN-Adobe/)
%\usepackage{zh_CN-Adobefonts_internal} % Simplified Chinese Support using system fonts
\begin{document}
\pagenumbering{gobble} % suppress displaying page number
\name{Arber Shabhasa}
\basicInfo{
\email{[email protected]} \textperiodcentered\
\phone{(+39) 328-305-6933} \textperiodcentered\
\linkedin[ashabhasa]{https://www.linkedin.com/in/ashabhasa} \textperiodcentered\
\github[ashabhasa]{https://www.github.com/ashabhasa}}
\section{\faGraduationCap\ Education}
\datedsubsection{\textbf{University degli Studi di Bologna}, Bologna, Italy}{2003 -- 2007}
\textit{B.S.} in Computer Engineering
\section{\faUsers\ Experience}
\datedsubsection{\textbf{Yoox Net-A-Porter Group.} Bologna, Italy}{Feb. 2017 -- Present}
\role{Senior Software Developer}{}
Brief introduction: Design implement and test the order management system for an e-commerce site.
Using different technologies like .Net (C\#) and Java (Scala/Java) we deliver the software platform upon which the whole e-commerce activities of Ynap group are founded.
I’ve had to tackle different problems during this experience like concurrency, distributed systems design, refactoring of legacy codebases etc.
Currently I’m using functional programming to help integrate different systems.
\datedsubsection{\textbf{Yoox Group}}{Nov. 2013 -- Feb. 2017}
\role{Software Developer}{Contractor}
Brief introduction: Responsible for designing and implementing the backend platform for the group.
\begin{itemize}
\item Designed/Implemented the cart api.
\item Designed/Implemented the exchange api.
\item Refactored legacy apis.
\end{itemize}
\datedsubsection{\textbf{Cineca/Kion}}{Jan. 2011 -- Nov. 2013}
\role{Software Developer}{Contractor}
Brief introduction:
I was part of a team that developed the governance platform that is used by most of the Italian universities (U-Gov).
The software handles different aspects of the life of the university's administration. It offers different tools of analysis and reporting and supports the strategic and operative planning of the different universities.
The development was done on the Java platform using Oracle as the database solution.
\datedsubsection{\textbf{Apex net}}{Jan. 2009 -- Dec. 2011}
\role{Software Developer}{}
Brief introduction: I was part of a team that developed different web based/mobile applications.
The applications helped Credit Institutions to perform and then assess real estate surveys.
As this was a small team I had to perform different tasks ranging from requirements gathering to analysis of the requirements, implementation, testing and deployment of the application.
We used different tools to implement the applications, web based applications were developed using the .Net platform whereas mobile applications were developed using iOS and Android.
\begin{itemize}
\item Designed/Implemented new applications for iOS
\item Implemented web based apps using .Net
\end{itemize}
% Reference Test
%\datedsubsection{\textbf{Paper Title\cite{zaharia2012resilient}}}{May. 2015}
%An xxx optimized for xxx\cite{verma2015large}
%\begin{itemize}
% \item main contribution
%\end{itemize}
\section{\faCogs\ Skills}
\begin{itemize}[parsep=0.5ex]
\item Test Driven Development
\item Refactoring
\item Programming Languages: Scala, Java, C\#
\item Databases: SqlServer/Oracle/Mongo
\end{itemize}
\section{\faHeartO\ Interests}
\begin{itemize}[parsep=0.5ex]
\item Functional programming
\item Distributed systems
\item Reading
\item Running
\end{itemize}
\section{\faInfo\ Miscellaneous}
\begin{itemize}[parsep=0.5ex]
\item Languages: English - Fluent, Italian - Fluent, Albanian - Native speaker
\end{itemize}
%% Reference
%\newpage
%\bibliographystyle{IEEETran}
%\bibliography{mycite}
\end{document}
| {
"alphanum_fraction": 0.7882802548,
"avg_line_length": 43.1318681319,
"ext": "tex",
"hexsha": "05daf6cb7d9284e08e106ac800100a46988d69da",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-03-29T08:35:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-29T08:35:07.000Z",
"max_forks_repo_head_hexsha": "c91eb56daec4318f905a89f7620c8e292222be97",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ashabhasa/resume",
"max_forks_repo_path": "resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c91eb56daec4318f905a89f7620c8e292222be97",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ashabhasa/resume",
"max_issues_repo_path": "resume.tex",
"max_line_length": 219,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c91eb56daec4318f905a89f7620c8e292222be97",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ashabhasa/resume",
"max_stars_repo_path": "resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 998,
"size": 3925
} |
\documentclass[10pt,a4paper]{article}
\usepackage[utf8]{inputenc}
\usepackage [margin=1.25in]{geometry}
\usepackage[english]{babel}
\usepackage{graphicx}
\title{NLP Project Proposal - Visual Relationship Detection}
\author{Makkunda Sharma \\ 2015CS50459 \and Madhur Singhal \\ 2015CS10235}
\begin{document}
\maketitle
\section{Hypothesis and Goal}
We hypothesize that images contain a huge deal of semantic information about the world and that much of this information can be expressed in terms of ``relationships" or ``interactions" between objects. We also observe that images often co-occur with natural language in the wild and an analysis (like a dependency parse) of sentences co-occuring with an image gives a lot of semantic information about the contents of the image. Thus our goal will be to train a model, which given an input image produces various `relationships' present in the image. We view these relationships as 3-tuples containing two object names (or synsets) and one relationship identifier along with bounding boxes corresponding to the two objects in the image. Some examples are (man,hugs,woman), (child,rides,elephant) and (branch, on,tree). We will also explore zero-shot learning of never before seen relationships using zero shot learning and relationship embeddings.
\section{Datasets}
We identified two datasets suitable for our purposes. First is the Flickr30k dataset which contains captions 158k captions for 30k images along with bounding boxes for the entities mentioned in the sentences. Second is the Visual Genome dataset which contains 100k images with 2.3 million relationships which are of the form we require. This dataset also contains 5.4 million captions of various regions of images which can be useful.
\section{Literature/Code Search}
We were inspired by the paper ``Visual Relationship Detection with Language Priors" which explores this idea. A more recent paper from last year ``Phrase Localization and Visual Relationship Detection with comprehensive Image-Language Cues" uses cues like sentence part of speech to better learn relationships. Some code is available for both of these papers though we have not fully explored their capabilites.
\section{Evaluation}
We can evaluate our model based upon the number of relationships present in test images that it recognizes correctly. Mean Average Precision and Recall are metrics commonly utilized. Further we can use sentence creation from relationships and use captioning metrics for evaluation too.
\section{Demo}
We can make a demo app which takes an image and gives the various relationships by highlighting the objects with bounding boxes and showing the most probable relationship keywords.
\end{document}
| {
"alphanum_fraction": 0.8151414921,
"avg_line_length": 108.84,
"ext": "tex",
"hexsha": "991300b8c3119ad44d3a70ce666b3d9e042d2a49",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a732797ec340dc6f495d4702e8ffd49f41ffb5d1",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "madhurcodes/NLP_Stuff",
"max_forks_repo_path": "Project/proposal.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a732797ec340dc6f495d4702e8ffd49f41ffb5d1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "madhurcodes/NLP_Stuff",
"max_issues_repo_path": "Project/proposal.tex",
"max_line_length": 949,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a732797ec340dc6f495d4702e8ffd49f41ffb5d1",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "madhurcodes/NLP_Stuff",
"max_stars_repo_path": "Project/proposal.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 571,
"size": 2721
} |
\section{Logic}\label{Logic}
This library deals with classical logic and its properties.
The main file is {\tt Classical.v}.
This library also provides some facts on equalities for dependent
types. See the files {\tt Eqdep.v} and {\tt JMeq.v}.
| {
"alphanum_fraction": 0.7570850202,
"avg_line_length": 27.4444444444,
"ext": "tex",
"hexsha": "1fb294f2f58858bb4376000926f832e04f1b35f9",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4fb3711723e2581a170ffd734e936f210086396e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mzp/coq-for-ipad",
"max_forks_repo_path": "Resources/coq-8.3pl2/theories/Logic/intro.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4fb3711723e2581a170ffd734e936f210086396e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mzp/coq-for-ipad",
"max_issues_repo_path": "Resources/coq-8.3pl2/theories/Logic/intro.tex",
"max_line_length": 65,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "4fb3711723e2581a170ffd734e936f210086396e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mzp/coq-for-ipad",
"max_stars_repo_path": "Resources/coq-8.3pl2/theories/Logic/intro.tex",
"max_stars_repo_stars_event_max_datetime": "2015-01-27T00:11:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-27T00:11:26.000Z",
"num_tokens": 61,
"size": 247
} |
\chapter{Benchmark Results}
We have compared the performance gain of {\ViennaCL} with standard CPU implementations using a single core. The code used for the benchmarks can be found in the folder \texttt{examples/benchmark/} within the source-release of {\ViennaCL}. Results are grouped by computational complexity and can be found in the subsequent sections.
\begin{center}
\begin{tabular}{|l|l|}
\hline
CPU & AMD Phenom II X4-965 \\
RAM & 8 GB \\
OS & Funtoo Linux 64 bit \\
\hline
Kernel for AMD cards: & 2.6.33 \\
AMD driver version: & 10.4 \\
\hline
Kernel for Nvidia cards: & 2.6.34 \\
Nvidia driver version: & 195.36.24 \\
\hline
{\ViennaCL} version & 1.0.0 \\
\hline
\end{tabular}
\end{center}
\NOTE{Compute kernels are not fully optimized yet, results are likely to improve considerably in future releases of {\ViennaCL}}
\TIP{Due to only partial support of double precision by GPUs from ATI at the time of these benchmarks, double precision arithmetics is not included, cf.~Tab.~\ref{tab:double-precision-GPUs}.}
\NOTE{When benchmarking {\ViennaCL}, first a dummy call to the functionality of interest should be issued prior to taking timings. Otherwise, benchmark results include the just-in-time compilation, which is a constant independent of the data size.}
\section{Vector Operations}
Benchmarks for the addition of two vectors and the computation of inner products are shown in Tab.~\ref{tab:vectorbench}.
\begin{table}[tb]
\begin{center}
\begin{tabular}{l|c|c|c|c}
Compute Device & add, float & add, double & prod, float & prod, double\\
\hline
CPU & 0.174 & 0.347 & 0.408 & 0.430 \\
NVIDIA GTX 260 & 0.087 & 0.089 & 0.044 & 0.072\\
NVIDIA GTX 470 & 0.042 & 0.133 & 0.050 & 0.053 \\
ATI Radeon 5850 & 0.026 & - & 0.105 & - \\
\end{tabular}
\caption{Execution times (seconds) for vector addition and inner products.}
\label{tab:vectorbench}
\end{center}
\end{table}
\section{Matrix-Vector Multiplication}
We have compared execution times of the operation
\begin{align}
\mathbf{y} = \mathbf{A} \mathbf{x} \ ,
\end{align}
where $\mathbf{A}$ is a sparse matrix (ten entries per column on average). The results in Tab.~\ref{tab:sparsebench} shows that by the use of {\ViennaCL} and a mid-range GPU, performance gains of up to one order of magnitude can be obtained.
\begin{table}[tb]
\begin{center}
\begin{tabular}{l|c|c}
Compute Device & float & double \\
\hline
CPU & 0.0333 & 0.0352 \\
NVIDIA GTX 260 & 0.0028 & 0.0043 \\
NVIDIA GTX 470 & 0.0024 & 0.0041 \\
ATI Radeon 5850 & 0.0032 & - \\
\end{tabular}
\caption{Execution times (seconds) for sparse matrix-vector multiplication using \texttt{compressed\_matrix}.}
\label{tab:sparsebench}
\end{center}
\end{table}
\section{Iterative Solver Performance}
The solution of a system of linear equations is encountered in many simulators. It is often seen as a black-box: System matrix and right hand side vector in, solution out. Thus, this black-box process allows to easily exchange existing solvers on the CPU with a GPU variant provided by {\ViennaCL}. Tab.~\ref{tab:solverbench} shows that the performance gain of GPU implementations can be significant. For applications where most time is spent on the solution of the linear systems, the use of {\ViennaCL} can reduce the total execution time by about a factor of five.
\begin{table}[tb]
\begin{center}
\begin{tabular}{l|c| c|c| c|c|}
Compute Device & CG, float & CG, double & GMRES, float & GMRES, double \\
\hline
CPU & 0.407 & 0.450 & 4.84 & 7.58 \\
NVIDIA GTX 260 & 0.067 & 0.092 & 4.27 & 5.08 \\
NVIDIA GTX 470 & 0.063 & 0.087 & 3.63 & 4.68 \\
ATI Radeon 5850 & 0.233 & - & 22.7 & -\\
\end{tabular}
\caption{Execution times (seconds) for ten iterations of CG and GMRES without preconditioner. Results for BiCGStab are similar to that of CG.}
\label{tab:solverbench}
\end{center}
\end{table}
| {
"alphanum_fraction": 0.7187339166,
"avg_line_length": 44.1590909091,
"ext": "tex",
"hexsha": "bb96358f9982c923aad743c9b3ca63ec47bb4f63",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6dac70e558ed42abe63d8c5bfd08465aafeda859",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bollig/viennacl",
"max_forks_repo_path": "doc/manual/benchmarks.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6dac70e558ed42abe63d8c5bfd08465aafeda859",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bollig/viennacl",
"max_issues_repo_path": "doc/manual/benchmarks.tex",
"max_line_length": 567,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "6dac70e558ed42abe63d8c5bfd08465aafeda859",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bollig/viennacl",
"max_stars_repo_path": "doc/manual/benchmarks.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-21T08:33:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-09-21T08:33:10.000Z",
"num_tokens": 1174,
"size": 3886
} |
\subsection{Data modeling}
\label{sec:model-creation}
The current implementation of \dBoost/ contains three data modeling strategies. Two of them are relatively simple machine-learning based applications that handle mostly continuous numerical data well.
They assume that data is distributed normally.
The last one makes much weaker assumptions about its input, and is best suited for studying discrete heterogeneous distributions produced by gathering human-input data. The following three subsections discuss the particulars of these three models; section~\ref{sec:partitioning} presents a way of extending models to deal with data that lends itself well to partitioning over a particular attribute.
\begin{figure}
\centering
\newcommand{\cramped}[3]{\subfloat[#2]{\includegraphics[width=.3\linewidth]{#1}\label{fig:#3}}}
\cramped{../graphics/gaussians-preview.pdf}{Gaussian}{gaussian}\hspace*{.01\linewidth}
\cramped{../graphics/mixtures-preview.png}{Mixture}{mixture}\hspace*{.01\linewidth}
\cramped{../graphics/histograms-preview.pdf}{Histogram}{histogram}
\caption{Simple visualization of the outlier detection strategy employed by each model. Possible outlier values are shown in red.}
\label{fig:models}
\end{figure}
\subsubsection{Simple Gaussian Modeling}
\label{sec:gaus_model}
The univariate Gaussian model (Figure~\ref{fig:gaussian}) treats each value $x_i$ of the expanded tuples as random sample drawn from a normal distribution $\mathcal N(\mu_i, \sigma_i)$.
The model's parameters (a pair $(\mu, \sigma)$ for each numerical column) are computed as each column's mean and standard deviation. In the common case where the dataset has not significantly changed between the analysis and the modeling passes, the information obtained during the statistical analysis pass is sufficient to derive these parameters.
Despite its simplicity, this model presents the attractive property of requiring extremely little memory -- on the order of the size of one expanded tuple.
\subsubsection{Mixture Modeling}
\label{sec:mixture_model}
The multivariate Gaussian Mixture model (Figure~\ref{fig:mixture}) takes advantage of the correlation hints supplied by the statistical analysis pass to model sub-tuples of the expanded tuples as samples from multivariate Gaussian mixtures (GMMs), creating one model per group of correlated columns.
For example, if the statistical analysis phase outlines a pair of fields $(f_1, f_2)$ as good candidate for joint modeling, then the Mixture modeling strategy will learn a particular GMM to model this correlation. Pairs of values $(X_1, X_2)$ are here assumed to have been produced by random sampling off a distribution of the form
\begin{align*}
\sum_{j=1}^{N} \pi_j \mathcal N(\mu_j, \Sigma_j)
\end{align*}
where $N$ is the number of individual components that the GMM comprises ($N$ is a user-defined value in our implementation, but abundant literature exists on the subject of choosing $N$~\cite{Schwartz1978}~\cite{Akaike1974}), and $\pi_j, \mu_j$ and $\Sigma_j$ are parameters of the GMM learned as part of the modeling pass~\cite{Dempster1977}.
Unlike simple Gaussian models, the expectation maximization algorithm used in inferring the optimal model parameters for Gaussian mixtures does require retaining some data in memory. Still, most of the fields obtained after expanding each tuple are discarded after the relevant ones are extracted for learning purposes; in most cases we expect the set of values retained to be much smaller than the set of all attributes, thus limiting the memory usage.
In addition, when dealing with large amounts of data, it is possible -- and indeed, preferable -- to train the Mixture model on a randomly sampled subset of the data before running the full analysis. This approach is particularly relevant when using the Mixture model, but can be applied to all models to shorten the learning phase when dealing with very large datasets.
\subsubsection{Histogram-based Modeling}
\label{sec:histograms}
Simple Gaussian modeling and Gaussian Mixture modeling both offer good results for continuous numerical data such as the ones produced by sensors, but suffer from two limitations:
\begin{itemize}
\item They make strong assumptions about the distribution of the data under study
\item They fail to capture patterns in discrete numerical data, non-numerical data, or heterogeneous data
\end{itemize}
Our last model (Figure~\ref{fig:histogram}) does not make any assumption about the data under study. Instead, it counts the occurrences of each unique value in each column of the expanded tuple and for each set of potentially correlated sub-tuples (as suggested by the analysis module). These counts, accumulated over the entire dataset, provide a \emph{de facto} distribution of the data in each field and set of correlated fields.
%Described as above, the histogram modeling strategy is rather memory inefficient: it requires keeping track of each value of each expanded tuple that the model comes across.
To limit memory usage, and to speed up the modeling phase, we discard histograms as soon as they reach a certain size -- say, 16 bins. Discarding histograms when their number of bins reaches a fixed threshold is just one of a number of heuristics that could be implemented here; the idea is that a profusion of different values, all repeating infrequently, is unlikely to provide valuable insight as far as outlier detection is concerned (as an extreme example, the histogram of an attribute with no repeated values would only have one value per bin, and would not yield any insight about the data). With this discarding heuristic applied, histograms are quick to generate and extremely memory efficient.
Histograms also have the valuable property of treating sets of fields (obtained via correlation analysis) and single fields in the exact same way, thus permitting to model single columns or groups of attributes indifferently. Finally, because they make no assumption about the data they manipulate (aside from the requirement that it be of small cardinality), histograms are able to accurately describe a broad class of discrete distributions.
\subsubsection{Meta-modeling through attribute-based partitioning}
\label{sec:partitioning}
The models presented above treat attributes and sets of correlated attributes as a whole. In some cases, however, it is possible to identify sub-populations of tuples by scrutinizing certain expanded attributes of the data; these sub-populations can then be studied separately, yielding more insight and better outlier classification performance.
As an example, consider the case of an airline adjusting status levels for its frequent fliers, using the number of flights for each passenger as well as their status level. A non-partitioned analysis may not return any interesting information, but a partitioned analysis could single out passengers in lower status levels traveling significantly more than average, or passengers with higher status traveling rarely. This would work even if statuses were stored as textual data, with no indication of their relative rankings.
The general approach, given a dataset and a pre-existing model, therefore consists in extracting sets of attributes based on correlation hints provided by earlier stages of the pipeline, and dividing each group of attributes between a single key (in the example, the status level) and one or more sub-population attributes (in the example, the number of flights). One instance of the selected model is then built for each value of the key. For example, if the statistical analysis phase highlights a correlation between columns $A$ (status levels) and $B$ (number of flights), and column $A$ contains values $a_1, \dots, a_n$ (\texttt{bronze}, \texttt{silver}, \texttt{gold}, \dots), then we distribute the pairs $(A, B)$ into $n$ partitions based on the value of $A$; values of $B$ in each of these partitions are then modeled independently (in the example, this yields a different model of flights count for each status level).
This type of approach is useful when the distribution for an attribute or set of attributes is multi-modal. A high-level non-partitioned analysis will reveal values that fall in none of the classes; a partitioned approach, on the other hand, may more easily reveal discrepancies by suppressing interference between each class.
In addition to providing better classification accuracy, partitioning may lead to better runtime performance by diminishing the size of the dataset covered by each model. These benefits are especially important when model construction performance does not scale linearly, and when data volumes are too large to be analyzed on a single machine.
Finally, attribute-based partitioning allows for previously impossible analysis. Assuming for example that a dataset with two columns has 4 classes identified by the value in the first column, each with 10 distinct expected values in the second column, a generic histogram-based analysis would discard the histogram for the pair of values as having too many buckets (40). A partitioned analysis, on the other hand, would allow the construction of four histograms, each with 10 regular bins and potentially a few outliers.
In our prototype implementation, we focused on partitioning applied to the discrete histogram case; the technique, however, generalizes to all the models presented above.
| {
"alphanum_fraction": 0.8063593005,
"avg_line_length": 129.2465753425,
"ext": "tex",
"hexsha": "67f975d2e05cad64a3a9b9eef85f4e4a71048366",
"lang": "TeX",
"max_forks_count": 16,
"max_forks_repo_forks_event_max_datetime": "2022-02-28T06:42:36.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-21T12:28:33.000Z",
"max_forks_repo_head_hexsha": "027ebeaf0ac4b524dc49df94e7bbc7be4391213d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "adrianlut/raha",
"max_forks_repo_path": "raha/tools/dBoost/paper/vldb/model-creation.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "027ebeaf0ac4b524dc49df94e7bbc7be4391213d",
"max_issues_repo_issues_event_max_datetime": "2020-10-08T11:19:03.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-08T11:19:03.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "adrianlut/raha",
"max_issues_repo_path": "raha/tools/dBoost/paper/vldb/model-creation.tex",
"max_line_length": 929,
"max_stars_count": 30,
"max_stars_repo_head_hexsha": "027ebeaf0ac4b524dc49df94e7bbc7be4391213d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "adrianlut/raha",
"max_stars_repo_path": "raha/tools/dBoost/paper/vldb/model-creation.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-07T07:44:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-05T12:03:45.000Z",
"num_tokens": 1949,
"size": 9435
} |
\subsection[\ttHF]{\ttHF}
\label{subsec:ttHF}
In the following subsections, the set-ups of the current baseline samples for the production of \ttbar quark pairs in association with
$b$-quarks (\ttHF) are described. NLO predictions with massive $b$-quarks in the matrix element and matched to parton shower
programs are available within the \SHERPAOL, \MGNLO and, more recently, \POWHEGBOX frameworks.
\subsubsection[Sherpa]{\SHERPA}
%\label{subsubsec:ttHF_sherpa}
The descriptions below refer to the \SHERPA[2.2.1] samples.
Details of the set-up are given in Ref.~\cite{ATL-PHYS-PUB-2016-016} and reported below.
\paragraph{Samples}
%\label{par:ttHF_sherpa_samples}
The descriptions below correspond to the samples in Table~\ref{tab:ttHF_Sh}.
\begin{table}[htbp]
\begin{center}
\caption{Nominal $t\bar{t}$+HF samples produced with \SHERPA. Variation samples are not explicitly listed.}
\label{tab:ttHF_Sh}
\begin{tabular}{ l | l }
\hline
DSID range & Description \\
\hline
410323--4 & $t\bar{t}$ single lepton \\
410325 & $t\bar{t}$ dilepton \\
410369 & $t\bar{t}$ all-hadronic \\
\hline
\end{tabular}
\end{center}
\end{table}
\paragraph{Description:}
Samples for \ttHF processes were produced with the \SHERPA[2.2.1]~\cite{Bothmann:2019yzt} generator,
using the MEPS@NLO prescription~\cite{Hoeche:2012yf} and interfaced with \OPENLOOPS~\cite{Buccioni:2019sur,Cascioli:2011va,Denner:2016kdg}
to provide the virtual corrections for matrix elements at NLO accuracy.
The four-flavour scheme is used with the $b$-quark mass set to 4.75\,\GeV.
The renormalisation scale \muR has the functional form
$\sqrt[4]{m_\text{T}(t) \cdot m_\text{T}(\bar{t}) \cdot m_\text{T}(b) \cdot m_\text{T}(\bar{b})}$. The
factorisation scale \muF was set to $H_\text{T}/2$, where $H_\text{T}$ is the transverse-mass sum of the
partons in the matrix element, and this value was also the resummation scale \muQ of the parton shower.
The \CT[10nlo] PDF set was used in conjunction with a dedicated PS tune developed by the \SHERPA authors.
\subsubsection[MadGraph5\_aMC@NLO+Pythia8]{\MGNLOPY[8]}
%\label{subsubsec:ttHF_aMCP8}
In the following, set-ups are described for \PYTHIA only.
Details of the set-up are given in Ref.~\cite{ATL-PHYS-PUB-2016-016} and reported below.
\paragraph{Samples}
%\label{par:ttHF_aMCP8_samples}
The descriptions below correspond to the samples in Table~\ref{tab:ttHF_amc}.
\begin{table}[htbp]
\begin{center}
\caption{Nominal $t\bar{t}$+HF samples produced with \MGNLOPY[8]. }
\label{tab:ttHF_amc}
\begin{tabular}{ l | l }
\hline
DSID range & Description \\
\hline
410265 & $t\bar{t}$ non-all-hadronic \\
410266 & $t\bar{t}$ dileptonic \\
410267 & $t\bar{t}$ all-hadronic \\
\hline
\end{tabular}
\end{center}
\end{table}
\paragraph{Description:}
Samples for \ttHF processes were produced with the \MGNLO generator
with the \NNPDF[3.0nlo]~\cite{Ball:2014uwa} PDF set. It was interfaced with \PYTHIA[8.230]~\cite{Sjostrand:2014zea},
using the A14 set of tuned parameters~\cite{ATL-PHYS-PUB-2014-021} and the \NNPDF[2.3lo] PDF.
The four-flavour scheme was used with the $b$-quark mass set to 4.75\,\GeV.
The renormalisation scale \muR has the functional form
$\sqrt[4]{m_\text{T}(t) \cdot m_\text{T}(\bar{t}) \cdot m_\text{T}(b) \cdot m_\text{T}(\bar{b})}$. The
factorisation scale \muF was set to $H_\text{T}/2$, where $H_\text{T}$ is the transverse-mass sum of the partons in the matrix
element.
The resummation scale \muQ has the form $\muQ = f_\text{Q} \sqrt{\hat{s}}$, where
the prefactor $f_\text{Q}$ is an external parameter randomly distributed in the
range $[f^\text{min}_\text{Q}$, $f^\text{max}_\text{Q}]=[0.1,0.25]$.
\subsubsection[PowhegBoxRes+Pythia8]{\POWHEGBOXRES+\PYTHIA[8]}
%\label{subsubsec:ttHF_PP8}
In the following, set-ups are described for \PYTHIA[8] only.
\paragraph{Samples}
%\label{par:ttHF_PP8_samples}
The descriptions below correspond to the samples in Table~\ref{tab:ttHF_pp8}.
\begin{table}[htbp]
\begin{center}
\caption{Nominal $t\bar{t}$+HF samples produced with \POWHEGBOXRES+\PYTHIA[8]. }
\label{tab:ttHF_pp8}
\begin{tabular}{ l | l }
\hline
DSID range & Description \\
\hline
411179--80 & $t\bar{t}$ non-all-hadronic \\
411178 & $t\bar{t}$ dileptonic \\
411275 & $t\bar{t}$ all-hadronic \\
\hline
\end{tabular}
\end{center}
\end{table}
\paragraph{Description:}
Samples for \ttHF processes were produced with the \POWHEGBOXRES~\cite{Jezo:2018yaf}
generator and \OPENLOOPS~\cite{Buccioni:2019sur,Cascioli:2011va,Denner:2016kdg}, using a pre-release
of the implementation of this process in \POWHEGBOXRES provided by the authors~\cite{ttbbPowheg},
with the \NNPDF[3.0nlo]~\cite{Ball:2014uwa} PDF set. It was interfaced with \PYTHIA[8.240]~\cite{Sjostrand:2014zea},
using the A14 set of tuned parameters~\cite{ATL-PHYS-PUB-2014-021} and the \NNPDF[2.3lo] PDF set.
The four-flavour scheme was used with the $b$-quark mass set to 4.95\,\GeV.
The factorisation scale was set to $0.5\times\Sigma_{i=t,\bar{t},b,\bar{b},j}m_{\mathrm{T},i}$,
the renormalisation scale was set to $\sqrt[4]{m_{\text{T}}(t)\cdot m_{\text{T}}(\bar{t})\cdot m_{\text{T}}(b)\cdot m_{\text{T}}(\bar{b})}$,
and the \hdamp parameter was set to $0.5\times\Sigma_{i=t,\bar{t},b,\bar{b}}m_{\mathrm{T},i}$.
| {
"alphanum_fraction": 0.7310988383,
"avg_line_length": 44.1260504202,
"ext": "tex",
"hexsha": "e07cf77b3ae21cd6151c0aa5e8d6a4254596c2d9",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e2640e985974cea2f4276551f6204c9fa50f4a17",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "diegobaronm/QTNote",
"max_forks_repo_path": "template/MC_snippets/ttHF.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e2640e985974cea2f4276551f6204c9fa50f4a17",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "diegobaronm/QTNote",
"max_issues_repo_path": "template/MC_snippets/ttHF.tex",
"max_line_length": 140,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e2640e985974cea2f4276551f6204c9fa50f4a17",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "diegobaronm/QTNote",
"max_stars_repo_path": "template/MC_snippets/ttHF.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1855,
"size": 5251
} |
% AUTORIGHTS
% Copyright (C) 2007 Princeton University
%
% This file is part of Ferret Toolkit.
%
% Ferret Toolkit is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2, or (at your option)
% any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program; if not, write to the Free Software Foundation,
% Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
\section{Management Module}
The management module provide a working environment for cass\_table,
cass\_index and cass\_distfunc. It respond to user request to
add/remove table, indices, etc.
\subsection {Management related API}
\begin{verbatim}
typedef struct _cass_env_t {
// Internal management data for controlling the whole system
// similar to DB_ENV in BDB.
// Need to hold all the meta data about tables, maps, indexes as well
// as their checkpoint informations.
} cass_env_t;
int cass_env_create(cass_env_t **env, uint32_t flags);
int cass_env_open(cass_env_t *env, char *db_home, uint32_t flags);
// Automatically recover to a consistent stage in case of crash.
int cass_env_close(cass_env_t *env, uint32_t flags);
int cass_env_err(cass_env_t *env, int error, const char *fmt, ...);
int cass_env_checkpoint(cass_env_t *env);
int cass_env_restorelastcheckpoint(cass_env_t *env);
// Control the system via cass_table_create, cass_idx_create etc.
// For simplicity, we will assume that we can load the table&index
// control structure into memory as part of cass_env upon system
// startup. This will simplify the management and enable on-disk
// sequential scan if needed since we know about the table even when
// the data is not in memory.
// Need to add details on how to do checkpointing and recovery.
// Need details on external config file, what the user interface will
// look like, etc.
\end{verbatim}
| {
"alphanum_fraction": 0.7606382979,
"avg_line_length": 41.0181818182,
"ext": "tex",
"hexsha": "a753f2811319f67e7f7ce2eee18001d5cd68ee59",
"lang": "TeX",
"max_forks_count": 40,
"max_forks_repo_forks_event_max_datetime": "2022-03-03T23:23:37.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-26T15:31:16.000Z",
"max_forks_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "manggoguy/parsec-modified",
"max_forks_repo_path": "pkgs/netapps/netferret/src/server/doc/spec/management.tex",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af",
"max_issues_repo_issues_event_max_datetime": "2022-03-13T03:54:24.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-12-15T08:30:19.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "manggoguy/parsec-modified",
"max_issues_repo_path": "pkgs/netapps/netferret/src/server/doc/spec/management.tex",
"max_line_length": 73,
"max_stars_count": 64,
"max_stars_repo_head_hexsha": "d14edfb62795805c84a4280d67b50cca175b95af",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "manggoguy/parsec-modified",
"max_stars_repo_path": "pkgs/netapps/netferret/src/server/doc/spec/management.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T13:26:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-03-06T00:30:56.000Z",
"num_tokens": 537,
"size": 2256
} |
% $Id$
% latex tex4ht-options or xhlatex tex4ht-options
%
% Copyright 2009-2016 TeX Users Group
% Copyright 2000-2009 Eitan M. Gurari
% Released under LPPL 1.3c+.
% See tex4ht-cpright.tex for license text.
\ifx \HTML\UnDef
\def\HTML{}
\def\CONFIG{\jobname}
\def\MAKETITLE{\author{Eitan M. Gurari}}
\def\next{\input mktex4ht.4ht \endinput}
\expandafter\next
\fi
%%%%%%%%%%%%%%%%%% definitions %%%%%%%%%%%%%%%%%%%%%%%%%
\input{common.tex}
\input{common-code.tex}
\input{tex4ht-cpright.tex}
\def\.{\string\a:mathml:\space}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Shared}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\<par del\><<<
!*?: >>>
\<tag of Tag\><<<
cw:>>>
\<tail\><<<
tail>>>
\<addr for Tag and Ref of Sec\><<<
\xdef\:cursec{|<section html addr|>}%
>>>
\<tex4ht.4ht\><<<
\ifnum\the\catcode`\%=14\else \expandafter\edef\csname
\string:RestoreCatcodes\endcsname{\catcode`\%|=\the
\catcode`\%}\catcode`\%|=14\fi
% tex4ht.4ht (|version), generated from |jobname.tex
% Copyright 2009-2016 TeX Users Group
% Copyright |CopyYear.1997. Eitan M. Gurari
|<TeX4ht copywrite|>
|<save catcodes|>
|<note about tex4ht.usr|>
\:CheckOption{info}\if:Option
\Hinclude[*]{info4ht.4ht}
\Log:Note{For additional information,
compile `xhlatex mktex4ht.4ht'^^J and review the appropriate
pointers under mktex4ht.html => index}
\else
\Log:Note{for additional information, use
the command line option `info'}
\fi
\immediate\write16{::::::::::::::::::::::::::::::::::::::::::}
\immediate\write16{ TeX4ht info is available in the log file }
\immediate\write16{::::::::::::::::::::::::::::::::::::::::::}
|<redefine Configure|>
|<built-in settings|>
|<user's configuration files|>
|<recall Configure|>
\:tempc
\:RestoreCatcodes
\endinput
>>>
\<redefine Configure\><<<
\let\:tempd|=\Configure
\def\Configure#1#2{%
\:CheckOption{#1}\if:Option \def\:tempc{#2}\fi}
>>>
\<recall Configure\><<<
\let\Configure|=\:tempd
>>>
\<user's configuration files\><<<
\openin15=tex4ht.usr \ifeof15 \else \closein15
\input tex4ht.usr
\fi
>>>
\<note about tex4ht.usr\><<<
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% It is highly recommended NOT TO CHANGE THIS FILE. Options
% ^^^^^^^^^^^^^^^^^^^^^^^
% defined in this file may be redefined, and new ones may be
% added, within a user supplied file named tex4ht.usr.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
>>>
\<built-in settings\><<<
|<html4 settings|>
|<0.0 and 3.2 settings|>
|<mozilla settings|>
|<html5 settings|>
|<tei settings|>
|<docbook settings|>
|<html for word settings|>
|<open office settings|>
|<html mathltx settings|>
|<html jsmath settings|>
|<html emacspeak settings|>
>>>
\<html4 settings\><<<
\def\:tempc{\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}%
\:CheckOption{javahelp}\if:Option
\Hinclude[*]{infojh.4ht}\fi
\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{html4-math.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option
\Hinclude[*]{svg.4ht}%
\Hinclude[*]{html4-svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
}
\Configure{uni-html4}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{html4-math.4ht}%
\Hinclude[*]{unicode.4ht}
\Hinclude[*]{html4-uni.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
}
\Configure{mathml}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\:CheckOption{info}\if:Option
\Hinclude[*]{infomml.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{html-mml.4ht}%
\Hinclude[*]{html4-uni.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
\:CheckOption{mathplayer}\if:Option
\:CheckOption{pmathml}\if:Option \else
\:CheckOption{pmathml-css}\if:Option \else
\Hinclude[*]{mathplayer.4ht}
\fi\fi
\else
\Log:Note{For MathML on MSIE + MathPlayer use
the command line option `mathplayer'}
\fi
}
>>>
\<html for word settings\><<<
\Configure{word}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{html4-math.4ht}%
\Hinclude[*]{htmlw.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
}
>>>
\<0.0 and 3.2 settings\><<<
\Configure{0.0}{%
\Hinclude[*]{html0.4ht}%
}
\Configure{3.2}{%
\:CheckOption{info}\if:Option
\:CheckOption{javahelp}\if:Option
\Hinclude[*]{infojh.4ht}\fi\fi
\Hinclude[*]{html32.4ht}%
\Hinclude[*]{html32-math.4ht}%
\:CheckOption{javahelp}\if:Option
|<option javahelp|>%
\fi
\:CheckOption{unicode}\if:Option \Hinclude[*]{unicode.4ht}\fi
}
>>>
Allows cleanup using xtpipes.
\<option javahelp\><<<
\Hinclude[*]{javahelp.4ht}%
\:CheckOption{jh-} \if:Option
\else
\edef\Preamble{\Preamble,xml}%
\Log:Note{for sources failing to produce
XML versions of HTML, try the command line option `jh-'}%
\fi
>>>
\<mozilla settings\><<<
\Configure{mozilla}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\:CheckOption{info}\if:Option
\Hinclude[*]{infomml.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{html-mml.4ht}%
\Hinclude[*]{mozilla.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
\:CheckOption{mathplayer}\if:Option
\Hinclude[*]{mathplayer.4ht}
\else
\Log:Note{For MathML on MSIE + MathPlayer use
the command line option `mathplayer'}
\fi
}
>>>
\<html5 settings\><<<
\Configure{html5}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\:CheckOption{info}\if:Option
\Hinclude[*]{infomml.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{html-mml.4ht}%
\Hinclude[*]{html5.4ht}%
}
>>>
\<tei settings\><<<
\Configure{tei}{%
\Hinclude[*]{tei.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{tei-math.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
}
\Configure{tei-mml}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infomml.4ht}\fi
\Hinclude[*]{tei.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{tei-mml.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
}
>>>
\<docbook settings\><<<
\Configure{docbook}{%
\Hinclude[*]{docbook.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{docbook-math.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
}
\Configure{docbook-mml}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infomml.4ht}\fi
\Hinclude[*]{docbook.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{docbook-mml.4ht}%
\:CheckOption{svg}%
\if:Option \else\:CheckOption{svg-}\fi
\if:Option \else\:CheckOption{svg-obj}\fi
\if:Option \Hinclude[*]{svg.4ht}%
\:CheckOption{info}\if:Option \Hinclude[*]{infosvg.4ht}\fi
\fi
}
>>>
\<open office settings\><<<
\Configure{ooffice}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}%
\Hinclude[*]{infomml.4ht}%
\Hinclude[*]{infoof.4ht}\fi
\Hinclude[*]{ooffice.4ht}%
\:CheckOption{1}\if:Option\else
\:CheckOption{2}\if:Option\else
\:CheckOption{3}\if:Option\else
\:CheckOption{4}\if:Option\else
\:CheckOption{5}\if:Option\else
\:CheckOption{6}\if:Option\else
\:CheckOption{7}
\fi
\fi
\fi
\fi
\fi
\fi
\if:Option
\Hinclude[*]{ooimpress.4ht}%
\fi
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{ooffice-mml.4ht}%
}
>>>
\<save catcodes\><<<
\expandafter\edef\csname :RestoreCatcodes\endcsname{%
\expandafter\ifx \csname :RestoreCatcodes\endcsname\relax\else
\csname :RestoreCatcodes\endcsname \fi
\catcode`\noexpand :|=\the\catcode`:%
\ifnum \the\catcode`\#=6 \else
\catcode`\noexpand \#|=\the\catcode`\#\fi
\ifnum \the\catcode`\^=7 \else
\catcode`\noexpand \^|=\the\catcode`\^\fi
\let\expandafter\noexpand\csname :RestoreCatcodes\endcsname|=
\noexpand\UnDefcS}
\catcode`\:|=11 \catcode`\#|=6 \catcode`\^|=7
>>>
\<html mathltx settings\><<<
\Configure{mathltx}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{html4-uni.4ht}%
\Hinclude[*]{mathltx.4ht}%
\Hinclude[*]{html-mltx.4ht}%
}
>>>
\<html jsmath settings\><<<
\Configure{jsmath}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{html4-uni.4ht}%
\Hinclude[*]{mathltx.4ht}%
\Hinclude[*]{html-mltx.4ht}%
\Hinclude[*]{jsmath.4ht}%
\Hinclude[*]{html-jsmath.4ht}%
}
>>>
\<html emacspeak settings\><<<
\Configure{emspk}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{html4-uni.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{html-mml.4ht}%
\Hinclude[*]{html-speech.4ht}%
\Hinclude[*]{html-speech-math.4ht}%
\Hinclude[*]{emacspeak.4ht}%
}
>>>
\<html emacspeak settings\><<<
\Configure{jsml}{%
\:CheckOption{info}\if:Option
\Hinclude[*]{infoht4.4ht}\fi
\Hinclude[*]{html4.4ht}%
\Hinclude[*]{unicode.4ht}%
\Hinclude[*]{html4-uni.4ht}%
\Hinclude[*]{mathml.4ht}%
\Hinclude[*]{html-mml.4ht}%
\Hinclude[*]{jsml.4ht}%
\Hinclude[*]{jsml-math.4ht}%
}
>>>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\OutputCodE\<tex4ht.4ht\>
\endinput
\subsection{Accents}
| {
"alphanum_fraction": 0.5578699815,
"avg_line_length": 26.5758928571,
"ext": "tex",
"hexsha": "1d14bc3f886cba7de14cb153698a607bb282e7a7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a256a7136d6638e90f07799892c005f2eb20730a",
"max_forks_repo_licenses": [
"LPPL-1.3c"
],
"max_forks_repo_name": "dgalcius/tex4ht-sync",
"max_forks_repo_path": "lit/tex4ht-options.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a256a7136d6638e90f07799892c005f2eb20730a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"LPPL-1.3c"
],
"max_issues_repo_name": "dgalcius/tex4ht-sync",
"max_issues_repo_path": "lit/tex4ht-options.tex",
"max_line_length": 82,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a256a7136d6638e90f07799892c005f2eb20730a",
"max_stars_repo_licenses": [
"LPPL-1.3c"
],
"max_stars_repo_name": "dgalcius/tex4ht-sync",
"max_stars_repo_path": "lit/tex4ht-options.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4119,
"size": 11906
} |
\chapter{Sufficient statistics}
In this chapter the theory of sufficient statistics are introduced.
First we begin with understanding of a statistic. Information about this subject can be found in \cite{casella2002statistical}. A statistic is a function that returns a summary of the data. Examples of this can be mean value and standard deviation of the sample. Lets assume our data is from a distribution depending on some parameters $\Theta$. The statistics we are interested in are those who contain information about the parameters $\Theta$. This leads us to sufficient statistics. A sufficient statistic is a statistic that captures all information about the parameters $\Theta$ and discard the rest. From \cite{casella2002statistical} we have the following.
\begin{defn}
(Sufficient statistic definition) A statistic $T(\boldsymbol{X})$ is sufficient statistic for $\Theta$ if the conditional distribution of sample $\boldsymbol{X}$ gives the value for $T(\boldsymbol{X})$ does not depend on $\Theta$.
\end{defn}
To find sufficient statistics can be difficult, however one can use the factorization theorem to find the statistics. The theorem is as follows.
\begin{theorem} % s276
(Factorization theorem) Let $f(\boldsymbol{x}| \Theta)$ denote the joint pdf or pmf of a sample $\boldsymbol{X}$. A statistic $T(\boldsymbol{X})$ is a sufficient statistic for $\Theta$ if and only if there exist functions $g(t|\Theta)$ and $h(\boldsymbol{x})$ such that, for all sample points $\boldsymbol{x}$ and all parameter points $\Theta$,
\begin{equation*}
f(\boldsymbol{x}|\Theta) = g(T(\boldsymbol{x})| \Theta)h(\boldsymbol{x}).
\end{equation*}
\end{theorem}
$h(\boldsymbol{x})$ should not be dependent on $\Theta$. Then the remaining part will be $g(T(\boldsymbol{x})| \Theta)$ and from this we can see the sufficient statistic.
\section{Sufficient statistics in NHPP}
\label{sec:sufstat}
To find the sufficient statistics for our NHPP one can use the likelihood function as shown in equation \ref{eq:like} and from \cite{lee1980testing}. The likelihood function can be found by
\begin{equation*}
P(t_1,..., t_n) = P(t_1,...,t_n|N(\tau)=n)P(N(\tau)=n).
\end{equation*}
From equation \ref{eq:NHPPdens} the probability density of a event $t_i$ given number of events $n$ is
\begin{equation*}
P(t_i|n) = \frac{\lambda(t_i)}{\Lambda(\tau)}.
\end{equation*}
Hence the joint probability given $n$
\begin{equation*}
P(t_1,...,t_n| N(\tau) = n) = n! \prod_{i = 1}^{n} \frac{\lambda(t_i)}{\Lambda(\tau)}.
\end{equation*}
The term $n!$ is included because the time runs are ordered.
The probability of $N=n$ events is as shown in equation \ref{eq:NumNHPP} with $t = \tau$. The joint probability of $\boldsymbol{t}$ becomes
\begin{equation*}
P(t_1,...,t_n) = e^{-\Lambda(\tau)} \frac{\Lambda(\tau)^n}{n!} n! \prod_{i=1}^n \frac{\lambda(t_i)}{\Lambda(\tau)} = e^{-\Lambda(\tau)} \prod_{i=1}^n \lambda(t_i).
\end{equation*}
From this the log likelihood function becomes
\begin{equation}
l = -\Lambda(\tau) + n\log(a) + n\log(b) + (b-1)\sum_{i=1}^{n} \log(t_i) + c\sum_{i=1}^{n} t_i.
\label{eq:loglike}
\end{equation}
From the factorization theorem we can set $u(t) = 1$. Then we get the sufficient statistics $(n,\displaystyle\sum_{i=1}^{n} t_i, \displaystyle\sum_{i=1}^{n} \log(t_i))$.
The last statistic can be rewritten to $\displaystyle\prod_{i=1}^{n} t_i$.
| {
"alphanum_fraction": 0.7279236277,
"avg_line_length": 79.8095238095,
"ext": "tex",
"hexsha": "db73b34252a86ea718810d836db86a59de4ca84a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3ef2fda314c55322de20f19ca861e4268a5e2d08",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mariufa/ProsjektOppgave",
"max_forks_repo_path": "Thesis/chapters/sufficientstats.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3ef2fda314c55322de20f19ca861e4268a5e2d08",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mariufa/ProsjektOppgave",
"max_issues_repo_path": "Thesis/chapters/sufficientstats.tex",
"max_line_length": 665,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3ef2fda314c55322de20f19ca861e4268a5e2d08",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mariufa/ProsjektOppgave",
"max_stars_repo_path": "Thesis/chapters/sufficientstats.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 989,
"size": 3352
} |
%!TEX root = ../notes.tex
\section{March 8, 2022}
\subsection{Legendre Symbol \emph{continued}}
\begin{example}\label{example:legendre-symbol}
Determine if $219$ is a quadratic residue mod $383$ (we note that $383$ is a prime).
\begin{align*}
\lege{219}{383} & = \lege{3}{383}\cdot\lege{73}{383} \\
\intertext{We now flip the Legendre Symbols using quadratic reciprocity:}
& = -\lege{383}{3} \cdot \lege{383}{73} \\
& = -\lege{2}{3} \cdot \lege{18}{73} \\
& = 1\cdot \lege{18}{73} \\
& = \lege{2}{73}\cdot\lege{9}{73} \\
& = \lege{2}{73} = \boxed{1}
\end{align*}
\end{example}
\begin{remark*}
We must factor the top argument before beginning to flip using quadratic reciprocity.
\end{remark*}
\subsection{Proof of Quadratic Reciprocity}
Recall \cref{thm:qr}:
\begin{theorem*}[Law of Quadratic Reciprocity]
Let $p, q\in\ZZ_+$ be distinct odd positive primes. Then
\begin{equation*}
\lege{p}{q}\lege{q}{p} = (-1)^{\frac{p-1}{2}\frac{q-1}{2}}
\end{equation*}
In other words,
\[\lege{p}{q} = \lege{q}{p}\]
if and only if at least one of $p, q$ is congruent to $1$ mod $4$.
\end{theorem*}
\begin{proof}[Proof of Quadratic Reciprocity (\cref{thm:qr}), using Gauss's Lemma (\cref{lemma:gauss-lemma})]
\textsc{wlog}, let
\begin{align*}
P & = \left\{1, 2, \dots, \frac{p-1}{2}\right\},\quad N=-P, \\
Q & = \left\{1, 2, \dots, \frac{q-1}{2}\right\}
\end{align*}
We write $\tilde P, \tilde N$ for $P\pmod{p}$ and $N\pmod{p}$ respectively, so that Gauss's lemma gives
\[\lege{q}{p} = (-1)^\mu, \quad\text{where }\mu = |q\tilde P\cap\tilde N|\]
In other words, $\mu$ is exactly the number of $x\in P$ such that $qx\equiv n\pmod{p}$ for some $n\in N$, and hence the number of $x\in P$ such that for $y\in \ZZ$, \[-\frac{p}{2} < qx - py < 0.\]
We now specify more precisely which $y$ can possibly satisfy this condition. Solving these inequalities for $y$ gives
\begin{align*}
\frac{qx}{p} < y < \frac{qx}{p}+\frac{1}{2}.
\end{align*}
\otoh, since $x\leq \frac{p-1}{2}$ $\forall x\in P$, this gives
\begin{align*}
y < \frac{qx}{p}+\frac{1}{2} & \leq \frac{q(p-1)}{2p}+\frac{1}{2} \\
& < \frac{q+1}{2}.
\end{align*}
Thus $0 < y < \frac{q+1}{2}$, which means that
\[y\in Q = \left\{1, 2, \dots, \frac{q-1}{2}\right\}.\]
We've shown that $\mu$ is the number of points $(x, y)\in P\times Q$ such that
\[\frac{p}{2} < qx - py < 0.\]
Switching $p$ and $q$, we also have
\[\lege{p}{q} = (-1)^\eta\]
where $\eta$ is the number of pairs
\[(y, x)\in Q\times P\]
such that
\[-\frac{q}{2} < py - qx < 0\]
which is exactly the number of pairs
\[(x, y)\in P\times Q\]
satisfying
\[0 < qx - py < \frac{q}{2}\]
(reflecting the inequality over $0$).
We note that
\[\lege{p}{q}\lege{q}{p} = (-1)^\mu (-1)^\eta = (-1)^{\mu+\eta}\]
so all that remains is counting $\mu$ and $\eta$. And we have that $\mu + \eta$ is the number of ordered pairs $(x, y)\in P\times Q$ such that either
\[-\frac{p}{2} < qx - py < 0 \text{ or } 0 < qx - py < \frac{q}{2}\]
Noting that $qx - py\neq 0$ since $x$ and $y$ are from $P$ and $Q$ respectively, hence we can reduce this to
\[-\frac{p}{2} < qx - py < \frac{q}{2}.\]
Graphically, we are looking at:
\begin{center}
\includegraphics[width=0.8\textwidth]{images/qr-diagram.png}
\end{center}
where $\mu + \eta$ is the number of lattice points in the shaded region.
If $\alpha$ is the number of lattice points in $A$ and $\beta$ the number of lattice points in $B$. Then
\[\mu + \eta = \frac{p-1}{2}\frac{q-1}{2} - (\alpha + \beta)\]
We show that $\alpha = \beta$ so that $\alpha + \beta \equiv 0\pmod{2}$.
Let $\rho$ be the rotation given by rotating the rectangle about its center leaves it invariant.
\[\rho(x, y) = \left(\frac{p+1}{2}-x, \frac{q+1}{2} - y\right)\]
Quick check that
\[qx - py < \frac{-p}{2} \Leftrightarrow qx' - py' > \frac{q}{2}\]
Since $\rho$ maps lattice points to lattice points, then $\alpha = \beta$ which concludes the proof with a little extra handiwork.
\end{proof}
\subsection{Jacobi Symbol}
The Jacobi symbol generalizes the Legendre symbol.
\begin{definition}[Jacobi Symbol]
Let $b$ be an odd positive integer and let $a\in\ZZ$. Write
$b = p_1p_2\cdots p_m$, where $p_i$ are (not necessarily distinct) primes. Then we write
\[\lege{a}{b} = \lege{a}{p_1}\lege{a}{p_2}\cdots \lege{a}{p_m}\]
is called the \ul{Jacobi symbol}.
\end{definition}
We note some basic properties that the Jacobi symbol is totally multiplicative (on top and bottom!):
\begin{align*}
\lege{a_1a_2}{b} & = \lege{a_1}{b}\lege{a_2}{b} \\
\lege{a}{b_1b_2} & = \lege{a}{b_1}\lege{a}{b_2}
\end{align*}
\begin{remark*}
Note that they're multiplicative \emph{fixing} either top or bottom. That is, they don't multiply like fractions.
\end{remark*}
\textbf{Warning!} $\lege{a}{b} = 1$ does not imply that $a$ is a quadratic residue modulo $b$ (since we could have $-1$'s from the factorization cancel out).
However, $\lege{a}{b} = -1$ \emph{does} imply that $a$ is a non-residue modulo $b$. (it is a non-residue mod at least one of prime factors of $b$).
\begin{example}
\[\lege{2}{15} = \lege{2}{3}\lege{2}{5} = (-1)(-1) = 1\]
but $2$ is not a quadratic residue modulo $15$.
\end{example}
\begin{proposition}[5.2.2 of Text]\label{prop:5.2.2}
We have the following properties about the Jacobi symbol:
\begin{enumerate}[(a)]
\item \[\lege{-1}{b} = (-1)^{\frac{b-1}{2}}\]
\item \[\lege{2}{b} = (-1)^{\frac{b^2 - 1}{8}}\]
\item If $a, b\in\ZZ_+$, then
\[\lege{a}{b}\lege{b}{a} = (-1)^{\frac{a-1}{2}\frac{b-1}{2}}\]
\end{enumerate}
\end{proposition} | {
"alphanum_fraction": 0.5798767283,
"avg_line_length": 47.6428571429,
"ext": "tex",
"hexsha": "8324c78c83631bd787a38d32622f2365fb3c5ab7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a3605894c69d4e3dd7f90829523ff3ec3c73a6f4",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "jchen/math1560-notes",
"max_forks_repo_path": "lectures/2022-03-08.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a3605894c69d4e3dd7f90829523ff3ec3c73a6f4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "jchen/math1560-notes",
"max_issues_repo_path": "lectures/2022-03-08.tex",
"max_line_length": 200,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "a3605894c69d4e3dd7f90829523ff3ec3c73a6f4",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "jchen/math1560-notes",
"max_stars_repo_path": "lectures/2022-03-08.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-03T20:28:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-02T15:41:56.000Z",
"num_tokens": 2170,
"size": 6003
} |
%!TEX root = ../../main.tex
\section{Heuristic Labeling and Quality Classification}
\label{scoring:sec:labelling}
We propose a semi-automatic labeling approach using a set of heuristics to label high quality (newsworthy) and low quality (noisy) content.
Our labeling approach is specifically designed not to label the majority of content.
Instead, we want to identify exceptional content which we can compare to the `random' background corpus, allowing the model to learn specific newsworthy and noisy features.
The use of heuristics has a number of advantages over the use of existing datasets and requires only minimal effort in comparison to creating a labeled dataset specifically for this task.
Many existing datasets that could be used for newsworthiness prediction are extremely small, or focus on a single large event or topic \citep{Kang12, Madhawa15}, making them less generalizable for training a newsworthiness classifier, and less likely to perform well across a broad range of events and event types.
Even large datasets with relevance judgements, such as the Events 2012 corpus from chapter \ref{chapter:collection}, covering 506 events with more than 100,000 relevance judgements, are unlikely to be suitable for supervised training.
Datasets of this size, although useful for evaluation, make no claims of completeness.
Despite having relevance judgements for a large number of events and tweets, the Events 2012 corpus covers only covers a tiny fraction of the many thousands of newsworthy events that take place every day, so training specifically on these events could result in over-fitting and decreased effectiveness on other datasets.
The use of heuristic labeling, rather than manual labeling, allows training data to be labeled in real-time and fed into the scoring approaches that follow.
This allows our approach to learn new features in real-time, as described in section \ref{scoring:sec:scoring}.
\cite{Madhawa15} used heuristic labeling to generate training data for newsworthiness classification.
They developed a classifier capable of classifying documents as either objective (high quality, newsworthy), or subjective (low quality, noise) as a filtering step before summarization.
However, their approach used only a small, manually curated lists of accounts as newsworthy sources, and labeled any tweet containing an emoticon or emoji as noise.
Instead, we use a broader set of heuristics that place fewer restrictions on what constitutes newsworthy and noisy sources, allowing for a broader range of sources and more training data.
As the inclusion of `conversational' or non-newsworthy posts has been shown to have a negative effect on credibility assessments \citep{Noyunsan17,Sikdar13-2}, many credibility assessment approaches use features that attempt to capture the newsworthiness of a post before assessment.
We base many of the heuristics that follow on those shown to be effective for credibility assessment \citep{Sikdar13, Kang12, Castillo11, Madhawa15}.
% This also allows the model to react and reflect Newsworthiness Scores based on events as they happen, something that would be impossible using a pre-trained model.
% This prevents over-fitting and should allow the approach to be considerably more generalisable -- often the rarest events are the most newsworthy, and the use of manually labelled training data would likely prevent these rarest from being scored appropriately unless an extraordinary volume of training data was used.
Although these heuristics form an important part of this work, as we demonstrate in section \ref{scoring:sec:eval}, the exact choice of features and weights is less important than it may initially appear, as we use a very simple score cutoff to determine quality. As long as the heuristics give a reasonable level of accuracy when selecting training data, the performance of the Newsworthiness Scorer is only mildly affected by changes to the heuristics or their weights.
\subsection{Features}
We define a number of weighted heuristics designed to identify exceptionally high or low quality content. Weights are multiplicative, and we take the product of the weights from each feature as the overall Quality Score, \(Q_d\).
\subsubsection{User Description: \(W_{desc}\)}
We manually created a list of keywords and phrases commonly used by news broadcasters, journalists and financial traders in their User Description on Twitter. The full list of these terms and their weights can be found in Table \ref{scoring:table:authorKeywordsWeights}.
\begin{table}[h!]
\caption{Terms and weights assigned to each term for scoring a user's profile description. }
\begin{tabulary}{\textwidth}{l c L}
\toprule
\textbf{Type} &\textbf{Weight} & \textbf{Terms} \\
\midrule
News \& Journalism & 2.0 & news, report, journal, write, editor, analyst, analysis, media, updates, stories \\
Finance \& Trading & 2.0 & trader, investor, forex, stock, finance, market \\
Spam & 0.1 & ebay, review, shopping, deal, sales, marketing, promot, discount, products, store, diet, weight, porn, follow back, followback \\
\bottomrule
\end{tabulary}
\label{scoring:table:authorKeywordsWeights}
\end{table}
Positive terms were identified manually based on common terms used by news organizations, journalists, and financial traders on Twitter. The list of spam terms was also created manually based on common types of low quality marketing spam that is often found on Twitter.
The `follow back' terms are commonly associated with users who artificially inflate their follower count by `following back' anyone who follows them.
We penalise them here to correct for any boost given by the Number of Followers feature described later in this section.
Rather than match whole words, we match on a prefix basis. For example, `report' matches \emph{report}, as well as \emph{report}er and \emph{report}s. Note that this is similar, but not identical to, matching on stemmed terms. This helps to keep the list of terms short whilst giving high coverage. The overall feature score is a product of weights for any matching terms.
We note that some of the terms used for scoring could be subjective or change over time due to topic drift.
While these terms work well for the Events 2012 corpus that we use for evaluation, they may require modifications for other datasets, or could potentially be replaced with a supervised classification approach.
\subsubsection{Account Age: \(W_{age}\)}
Young accounts are generally viewed as less credible than accounts that have been active for longer periods of time \citep{Sikdar13}, so we give accounts created within the last 90 days a lower weight.
\begin{table}[ht]
\centering
\caption{Follower ranges and weights assigned to accounts who have followers between the range defined.}
\begin{tabulary}{\textwidth}{l c}
\toprule
\textbf{Account Age (days)} & \textbf{Weight} \\
\midrule
< 1 day & 0.05 \\
< 30 days & 0.10 \\
< 90 days & 0.25 \\
90+ days & 1.00 \\
\bottomrule
\end{tabulary}
\label{scoring:table:accountAge}
\end{table}
\subsubsection{Number of Followers: \(W_{followers}\)}
It is often the case that something becomes news not because of what was said, but who said it. A head of state or public figure expressing sympathy for victims of an accident is considerably more newsworthy than the average person doing the same.
The number of followers a users has can infer how influential or newsworthy the user is, and thus how newsworthy the user's tweets are likely to be, and is a commonly used feature for automatic credibility assessment \citep{Kang12, Sikdar13, Gun14, Madhawa15}.
Given this, we assign higher weights to users with more followers, and a lower weight to users with very few followers, as shown in Table \ref{scoring:table:followersWeight}.
The vast majority of tweets (83.81\%) are posted by users who have between 50 and 4,999 followers, and are unaffected by this feature. The aim is to affect only the extremes: users with very few, or very many followers.
\begin{table}[ht]
\centering
\caption{Follower ranges and weights assigned to accounts who have followers between the range defined.}
\begin{tabulary}{\textwidth}{l c}
\toprule
\textbf{Number of Followers} & \textbf{Weight} \\
\midrule
0 - 49 & 0.5 \\
50 - 4,999 & 1.0 \\
5,000 - 9,999 & 1.5 \\
10,000 - 99,999 & 2.0 \\
100,000 - 999,999 & 2.5 \\
1,000,000+ & 3.0 \\
\bottomrule
\end{tabulary}
\label{scoring:table:followersWeight}
\end{table}
\subsubsection{User Verified Status: \(W_{verified}\)}
Politicians, organizations, celebrities, journalists and other public figures can request that Twitter `verify' their identity by marking their account as verified and displaying a blue badge near to their name or display picture \footnote{\texttt{https://help.twitter.com/en/managing-your-account/about-twitter-\\verified-accounts}}. Although the exact requirements for verification are undocumented, verification is often seen as a sign of authenticity and significance.
At the time of writing, approximately 290,000 accounts have been verified by Twitter, a full list of which can be obtained by examining the list of accounts followed by Twitter's \texttt{@verified}\footnote{\texttt{https://twitter.com/verified/following}} account. A survey of Verified accounts in 2015 found that approximately 41\% of account are related to news, journalism, politics or government\footnote{\texttt{https://medium.com/three-pipe-vc/who-are-twitter-s-verified-users-\\af976fc1b032}}. This supports our hypothesis that verified accounts are a good source of high quality, newsworthy content, so we give Verified users a weight of 1.5. Unverified users as unaffected by this feature (i.e. given weight of 1.0).
\subsubsection{Posts Per Day: \(W_{ppd}\)}
Quality and quantity often have an inverse correlation, especially on social media.
Accounts which produce an extremely high volume of posts are often automated accounts repeating content from other sources with the aim of acquiring followers, advertising a product or service, and more recently, for the purpose of propaganda and misinformation \citep{Forelle15, Howard16}.
Accounts that post more than 50 times per day are often considered to be heavily automated \citep{Howard16}. For this reason, we penalize any account that posts more than 50 times per day on average (weight of 0.5), and apply a more severe penalty for accounts which tweet more than 100 times per day on average (weight of 0.25).
We note, however, that many heavily automated accounts are in fact prominent news and media organizations. To prevent these legitimate accounts from being penalized, we do no apply any penalty to Verified accounts.
\subsubsection{Has Default Profile Image: \(W_{image}\)}
Twitter users who do not provide a custom profile image (often nicknamed ``eggs'' due to Twitter's historic use of a egg as the default profile image) are generally considered less trustworthy and credible \citep{Castillo11,Sikdar13, Gun14} than users who have taken the time to provide a custom image.
Twitter themselves noted that users who create `throwaway' accounts, often for the purpose of spamming or to post abuse, tend not to personalize their accounts.
The association between the default egg image and low quality accounts has been noted in published work previously \citep{Sikdar13}, and the public association was one of the key reasons noted by Twitter for changes to their default profile image in March 2017\footnote{\texttt{https://blog.twitter.com/en\_us/topics/product/2017/rethinking-our-\\default-profile-photo.html}}.
We assign user accounts that have not specified a custom profile image a weight of 0.5.
\subsection{Labeling}
As described earlier, document \(d\) is assigned an overall Quality Score, \(Q_d\), taken as the product of scores from each feature:
\begin{equation}
Q_d = W_{desc} \times W_{age} \times W_{followers} \times W_{verified} \times W_{ppd} \times W_{image}
\end{equation}
For example, a tweet \(d\) with weights 2.0 for \(W_{desc}\), 1.5 for \(W_{followers}\), and 1.0 for the all other features would have an overall Quality Score, \(
Q_d = 2.0 \times 1.0 \times 1.5 \times 1.0 \times 1.0 \times 1.0 = 3.0 \).
Cutoff values are used to determine quality labels from \(Q_s\).
We examine how various cutoff values affect performance in section \ref{scoring:sec:eval}, however unless otherwise stated, we label tweets with \(Q_d \geq 4.0\) as High Quality, and \(Q_d \leq 0.25\) as Low Quality, as these give the best classification rates for the Events 2012 corpus.
Documents between this range are unlabeled.
| {
"alphanum_fraction": 0.7864200458,
"avg_line_length": 81.6193548387,
"ext": "tex",
"hexsha": "e8533d5675aa973ee6a69676a0d126889bc1d6ed",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b83bc0913b6b8ad1f5202c37fa0623a784b6e09a",
"max_forks_repo_licenses": [
"Xnet",
"X11"
],
"max_forks_repo_name": "JamesMcMinn/Thesis-Revisions",
"max_forks_repo_path": "Chapters/Newsworthiness/labelling.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b83bc0913b6b8ad1f5202c37fa0623a784b6e09a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Xnet",
"X11"
],
"max_issues_repo_name": "JamesMcMinn/Thesis-Revisions",
"max_issues_repo_path": "Chapters/Newsworthiness/labelling.tex",
"max_line_length": 725,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b83bc0913b6b8ad1f5202c37fa0623a784b6e09a",
"max_stars_repo_licenses": [
"Xnet",
"X11"
],
"max_stars_repo_name": "JamesMcMinn/Thesis-Revisions",
"max_stars_repo_path": "Chapters/Newsworthiness/labelling.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2966,
"size": 12651
} |
%!TEX root = labo.tex
\subsubsection*{Routing protocols}
\begin{itemize}
\item \emph{Distance Vector and Link State Routing Protocols}: Go to the website \url{http://docwiki.cisco.com/wiki/Internetworking_Technology_Handbook} and read the article about dynamic routing protocols. Review your knowledge of interdomain and intradomain routing, distance vector routing, and link state routing.
\item \emph{Zebra}: Go to the website of the Zebra fork Quagga at \url{http://www.nongnu.org/quagga/} and study the information on the Quagga routing protocol software for Linux systems. Also find and read the man pages on zebra, ripd, ospfd and bgpd. Note: Quagga is a fork of the GNU Zebra project.
\item \emph{RIP}: Read the overview of the Routing Information Protocol (RIP) and study the commands to configure RIP on a Cisco router at \url{http://www.routeralley.com/guides/rip.pdf}.
\item \emph{OSPF}: Read the overview of Open Shortest Path First (OSPF) routing protocol and study the commands to configure OSPF on a Cisco router at \url{http://www.routeralley.com/guides/ospf.pdf}.
\end{itemize}
\newpage
\subsubsection*{Prelab Questions}
\begin{questions}
\q{1}{Provide the command that configures a Linux PC as an IP router (see Lab 3).}
\q{2}{What are the main differences between a distance vector routing protocol and a link state routing protocol? Give examples for each type of protocol.}
\q{3}{What are the differences between an intradomain routing protocol (also called interior gateway protocol or IGP) and an interdomain routing protocol (also called exterior gateway protocol or EGP)? Give examples for each type of protocol.}
\q{4}{Which routing protocols are supported by the software package Zebra?}
\q{5}{In the Zebra software package, the processes ripd, ospfd, and bgpd deal, respectively, with the routing protocols RIP, OSPF, and BGP. Which role does the process zebra play?}
\q{6}{Describe how a Linux user accesses the processes of Zebra (zebra, ripd, ospfd, bgpd) processes to configure routing algorithm parameters?}
\q{7}{What is the main difference between RIP version 1 (RIPv1) and RIP version 2 (RIPv2)?}
\q{8}{Explain what it means to ``run RIP in passive mode''.}
\q{9}{Explain the meaning of ``triggered updates'' in RIP.}
\q{10}{Explain the concept of split-horizon in RIP?}
\q{11}{What is an autonomous system (AS)? Which roles do autonomous systems play in the Internet?}
\q{12}{What is the AS number of your institution? Which autonomous system has AS number 1?}
\q{13}{Explain the terms: Stub AS, Multi-homed AS and Transit AS?}
\end{questions}
| {
"alphanum_fraction": 0.7685505575,
"avg_line_length": 89.6896551724,
"ext": "tex",
"hexsha": "1d8ef0a9ccfef3615cb4620cef23b6aa743b50c4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f900d3e74e5e225791a537c3a4e7bbc5afb1d93b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "arminnh/lab-computer-networks",
"max_forks_repo_path": "Lab 4/prelab4.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f900d3e74e5e225791a537c3a4e7bbc5afb1d93b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "arminnh/lab-computer-networks",
"max_issues_repo_path": "Lab 4/prelab4.tex",
"max_line_length": 318,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f900d3e74e5e225791a537c3a4e7bbc5afb1d93b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "arminnh/lab-computer-networks",
"max_stars_repo_path": "Lab 4/prelab4.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 702,
"size": 2601
} |
\documentclass[../main.tex]{subfiles}
\begin{document}
\section{Appendix K: System Lifecycle Management Policy}
To be published
\end{document} | {
"alphanum_fraction": 0.7887323944,
"avg_line_length": 28.4,
"ext": "tex",
"hexsha": "f51d8792fd2b442dee7d6200aeef68ca2af64db3",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-03T16:29:52.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-16T19:38:27.000Z",
"max_forks_repo_head_hexsha": "f6e7374dfd1a5e6bc44adafaacc9708618139f62",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ericiussecurity/ProjectKeystone",
"max_forks_repo_path": "TeX/sections/appendixK.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "f6e7374dfd1a5e6bc44adafaacc9708618139f62",
"max_issues_repo_issues_event_max_datetime": "2022-03-04T13:58:20.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-03-03T17:51:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ericiussecurity/ProjectKeystone",
"max_issues_repo_path": "TeX/sections/appendixK.tex",
"max_line_length": 56,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "f6e7374dfd1a5e6bc44adafaacc9708618139f62",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ericiussecurity/ProjectKeystone",
"max_stars_repo_path": "TeX/sections/appendixK.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-17T15:42:31.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-07T17:37:06.000Z",
"num_tokens": 37,
"size": 142
} |
\documentclass[a4paper]{article}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{libertine}
\usepackage[libertine]{newtxmath}
\usepackage{amsmath}
\usepackage{commath}
\usepackage{mathrsfs}
% \usepackage{enumitem}
\usepackage{bm}
\usepackage{authblk}
% \usepackage{esdiff}
% \usepackage{microtype}
\newcommand{\M}[1]{\bm{#1}}
\newcommand{\Mc}[1]{\mathbf{#1}}
\newcommand{\V}[1]{\mathbf{#1}}
\newcommand{\transpose}{^{\text{T}}}
\newcommand{\E}{\text{E}}
\newcommand{\fourier}{\mathcal{F}}
\newcommand{\lagrange}{\mathscr{L}}
\newcommand{\sub}[1]{_{\mathrm{#1}}}
\DeclareMathOperator{\Var}{Var}
\DeclareMathOperator{\nul}{nul}
\DeclareMathOperator{\rank}{rank}
\title{Summary of TTK4115}
\author{Morten Fyhn Amundsen}
\affil{NTNU}
\begin{document}
\maketitle
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Matrix stuff}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\paragraph{Nullity} $\nul(\M{A}) = \mbox{No. of columns of } \M{A} - \rank(\M{A})$
\paragraph{Positive definite} A symmetric $n \times n$ real matrix $\M{M}$ is \emph{positive definite} if all its eigenvalues are positive. (Or if $\V{z}\transpose \M{M}\V{z} > 0$ for every non-zero vector $\V{z}$ of $n$ real numbers.) It is positive \emph{semidefinite} if all eigenvalues are positive or zero.
\paragraph{Singularity} A square matrix is singular if it is not invertible, i.e. if its determinant is $0$.
\paragraph{Matrix exponential (diagonal)}
$$\M{A} =
\begin{bmatrix}
a_1 & \cdots & 0 \\
\vdots & \ddots & \vdots \\
0 & \cdots & a_n \\
\end{bmatrix}
\quad \implies \quad
e^{\M{A}t} =
\begin{bmatrix}
e^{a_1t} & \cdots & 0 \\
\vdots & \ddots & \vdots \\
0 & \cdots & e^{a_nt} \\
\end{bmatrix}$$
\paragraph{Matrix exponential (Cayley-Hamilton Method)}
$$e^{\M{A}t} = \sum_{k=0}^{n-1} \alpha_k \M{A}^k
\quad \text {with } \alpha_0 \cdots \alpha_{n-1} \text{ determined by} \quad
e^{\lambda_i t} = \sum_{k=0}^{n-1} \alpha_k \lambda_i^k$$
\paragraph{Matrix exponential (Laplace method)}
$$e^{\M{A}t} = \lagrange \left\{ (s\Mc{I}-\M{A})^{-1} \right\}$$
\paragraph{Matrix exponential (Jordan form)}
$$e^{\M{A}t} = \M{Q} e^{\bar{\M{A}}t} \M{Q}^{-1} \quad \text{where} \quad \bar{\M{A}} = \M{Q}^{-1}\M{AQ}$$
\paragraph{Controllability matrix} $\mathcal{C} = \begin{bmatrix}\M{B} & \M{AB} & \M{A}^{2}\M{B} & \hdots & \M{A}^{n-1}\M{B}\end{bmatrix}$
\paragraph{Observability matrix} $\mathcal{O} = \begin{bmatrix}\M{C} & \M{CA} & \M{CA}^2 & \hdots & \M{CA}^{n-1}\end{bmatrix}^{\text{T}}$
\paragraph{Minimal realisation} Given a transfer function; a state-space model that is controllable and observable, and has the same input-output behaviour as the function, is minimal.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Eigen stuff}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\paragraph{Eigenvalues} Values of $\lambda$ such that $\Delta(\lambda) = |\lambda \M{I} - \M{A}| = 0$.
\paragraph{Eigenvectors} Vectors $\V{v}$ such that $(\M{A} - \lambda \Mc{I})\V{v} = 0$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Stability}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\paragraph{Asymptotic stability} Occurs if all poles have strictly negative real parts.
\paragraph{Instability} Occurs if one or more poles have positive real parts.
\paragraph{Marginal stability} Occurs when the real part of every pole is non-positive, at least one pole has zero real value, and there are no repeated poles on the imaginary axis.
\paragraph{BIBO stability} If bounded input $\rightarrow$ bounded output. Defined for the zero-state response (initially relaxed system). See Section \ref{sec:bibo}.
\paragraph{Lyapunov stability} If every finite initial state gives a finite response. I.e. the zero-input response.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{BIBO Stability}\label{sec:bibo}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{BIBO Stability for Continuous Systems}
A continuous system is BIBO stable \emph{iff}:
\begin{itemize}
\item (SISO) $g(t)$ is absolutely integrable in $[0, \infty)$ \quad or \quad $\int_{0}^{\infty} |g(t)| \dif t \leq M < \infty $ for some constant $M$.
\item (SISO/MIMO) Every pole of every transfer function in $\M{\hat{G}}(s)$ or $\hat{g}(s)$ has a negative real part.
\end{itemize}
\subsection{BIBO Stability for Discrete Systems}
A discrete system is BIBO stable iff:
\begin{itemize}
\item Every pole of every transfer function in $\M{\hat{G}}(s)$ or $\hat{g}(s)$ has magnitude less than $1$.
\end{itemize}
\subsection{Lyapunov Stability for Linear Systems}
An LTI system $\V{\dot{x}} = \M{A}\V{x}$ is stable if there exists a \emph{symmetric} positive definite matrix $\M{P}$ that satisfies the Lyapunov Equation
$$\M{A} \transpose \M{P} + \M{MP} = - \M{N}$$
Where $\M{N}$ is an arbitrary positive definite matrix.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Discretisation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
$$\M{A}_d = e^{\M{A}T},\qquad \M{B}_d = \int_0^T e^{\M{A}\tau} \dif \tau \M{B}, \qquad \M{C}_d = \M{C}, \qquad \M{D}_d = \M{D}$$
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Similarity transform}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
A linear change of coordinates where the original object is expressed with respect to a different basis. The representation of $\V{x}$ with respect to the basis $\{ \V{q}_1, \dots, \V{q}_n \}$ is $\V{\bar{x}}$ and with $\M{Q} = [ \V{q}_1, \dots, \V{q}_n ]$, the similarity transform is
$$\V{x} = \M{Q}\V{\bar{x}}.$$
The system originally expressed as
$$\V{\dot{x}} = \M{A}\V{x} + \M{B}\V{u}$$
$$\V{y} = \M{C}\V{x} + \M{D}\V{u}$$
is transformed to
$$\V{\dot{\bar{x}}} = \M{\bar{A}}\V{\bar{x}} + \M{\bar{B}}\V{u}$$
$$\V{y} = \M{\bar{C}}\V{\bar{x}} + \M{\bar{D}}\V{u}$$
where
$$\M{\bar{A}} = \M{Q}^{-1}\M{AQ},\qquad \M{\bar{B}} = \M{Q}^{-1}\M{B},\qquad \M{\bar{C}} = \M{CQ},\quad \M{\bar{D}} = \M{D}$$
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Jordan canonical form}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
General strategy:
\begin{enumerate}
\item Find all eigenvectors corresponding to an eigenvalue of $\M{A}$.
\item The number of L.I. eigenvectors is the number of Jordan blocks.
\item For each eigenvector $\V{q}$, solve $(\lambda \Mc{I} - \M{A}) \V{v} = \V{q}$ for the vector $\V{v}$.
\end{enumerate}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Statistics}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\paragraph{Expected value} $\E[X] = \int_{-\infty}^{\infty} x f(x) \dif t$
\paragraph{Variance} $\Var(X) = \E[(X-\E[X])^{2}] = \E[X^{2}] - (\E[X])^{2}$
\paragraph{Autocorrelation} $R_X(t_1, t_2) = \E[X(t_1)X(t_2)]$
\paragraph{Wide-sense stationary process} $X(t)$ is WSS if its mean and autocorrelation functions are time invariant: \quad $\E[X(t)] = \nu$ \quad and \quad $R_X(t_1, t_2) = f(t_2-t_1)$.
\paragraph{Spectral density function} $S_X(\jmath\:\omega) = \fourier\left\{ R_X(\tau) \right\}$
\paragraph{Gauss--Markov process} A stationary Gaussian process $X(t)$ that has an exponential autocorrelation is called a \emph{Gauss--Markov} process.
$$R_X(\tau) = \sigma^2 e^{-\beta |\tau|}$$ $$S_X(\jmath\:\omega) = \frac{2\sigma^2\beta}{\beta^2 + \omega^2} \quad\text{or}\quad S_X(s) = \frac{2\sigma^2\beta}{\beta^2 - s^2}$$
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Linear-quadratic regulator}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
A system is given as $$\V{\dot{x}} = \M{A}\V{x} + \M{B}\V{u}$$
with a state feedback $\V{u} = - \M{K}\V{x}$ chosen to minimise the cost function
$$J = \int_0^\infty \V{x}\transpose \M{Q} \V{x} + \V{u}\transpose \M{R} \V{u} \dif t$$
where $\M{Q}$ is symmetric and positive semidefinite, $\M{R}$ is symmetric and positive definite, and $\M{K} = \M{R}^{-1}\M{B}\transpose\M{P}$. The matrix $\M{P}$ is found by solving $$\M{A}\transpose \M{P} + \M{PA} - \M{PBR}^{-1}\M{B}\transpose \M{P} + \M{Q} = \M{0}$$
The relative values of the elements of $Q$ and $R$ enforce tradeoffs between the magnitude of the control action and the speed of the response. The equilibrium can be shifted from $\V{0}$ to $\V{x}\sub{eq}$ by instead using $\V{u} = \M{P}\V{x}\sub{eq} - \M{K}\V{x}$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Kalman filter}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The system is given as:
$$\V{x}_{k+1} = \M{A}\V{x}_k + \M{B}\V{u}_k + \V{w}_k$$
$$\V{z}_k = \M{H}_k\V{x}_k + \V{v}_k$$
The Kalman measurement update equations are:
$$\M{K}_k = \M{P}_k^- \M{H}_k\transpose (\M{H}_k\M{P}_k^- \M{H}_k\transpose + \M{R}_k)^{-1}$$
$$\V{\hat{x}}_k = \V{\hat{x}}_k^- + \M{K}_k(\V{z}_k - \M{H}_k \V{\hat{x}}_k^-)$$
$$\M{P}_k = (\Mc{I} - \M{K}_k \M{H}_k) \M{P}_k^- (\Mc{I}-\M{K}_k\M{H}_k)\transpose + \M{K}_k\M{R}_k\M{K}_k\transpose$$
And the time update equations are:
$$\V{\hat{x}}_{k+1}^- = \M{A}\V{\hat{x}}_k + \M{B}\V{u}_k$$
$$\M{P}_{k+1}^- = \M{AP}_k\M{A}\transpose + \M{Q}_k$$
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Extended Kalman filter}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The system is given as:
$$\V{x}_{k+1} = \V{f}(\V{x}_k, \V{u}_k) + \V{w}_k$$
$$\V{y}_k = \V{h}(\V{x}_k) + \V{v}_k$$
The Kalman measurement update equations are:
$$\M{K}_k = \M{P}_k^- \M{C}_k\transpose (\M{C}_k \M{P}_k^- \M{C}_k\transpose + \M{R})^{-1}$$
$$\V{\hat{x}}_k = \V{\hat{x}}_k^- + \M{K}_k \left(\V{y}_k - \V{h}(\V{\hat{x}}_k^-)\right)$$
$$\M{P}_k = (\Mc{I} - \M{K}_k\M{C}_k) \M{P}_k^- (\Mc{I} - \M{K}_k\M{C}_k)\transpose + \M{K}_k\M{R}_k\M{K}_k\transpose$$
And the time update equations are:
$$\V{\hat{x}}_{k+1}^- = \V{f}(\V{\hat{x}}_k, \V{u}_k)$$
$$\M{P}_{k+1}^- = \M{A}_k\M{P}_k\M{A}_k\transpose + \M{Q}_k$$
Where:
$$\M{A}_k = \pd{\V{f}}{\V{x}_k}\bigg|_{\V{x}_k = \V{\hat{x}}_k} \qquad\text{and}\qquad \M{C}_k = \od{\V{h}}{\V{x}_k}\bigg|_{\V{x}_k = \V{\hat{x}}_k^-}$$
\end{document}
| {
"alphanum_fraction": 0.5513732464,
"avg_line_length": 49.6176470588,
"ext": "tex",
"hexsha": "dbbbc2eaa775913273bfd69fb9560e3a41c030db",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8ba859de2349b93c5079ca10a4cf2ec49c1f5dc0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jakoblover/ntnu-course-summaries",
"max_forks_repo_path": "TTK4115 Linear system theory/TTK4115-Summary.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8ba859de2349b93c5079ca10a4cf2ec49c1f5dc0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jakoblover/ntnu-course-summaries",
"max_issues_repo_path": "TTK4115 Linear system theory/TTK4115-Summary.tex",
"max_line_length": 311,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "8ba859de2349b93c5079ca10a4cf2ec49c1f5dc0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jakoblover/ntnu-course-summaries",
"max_stars_repo_path": "TTK4115 Linear system theory/TTK4115-Summary.tex",
"max_stars_repo_stars_event_max_datetime": "2019-04-11T02:42:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-05-30T09:19:22.000Z",
"num_tokens": 3657,
"size": 10122
} |
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage{geometry}
\usepackage{tikz}
\usepackage{graphicx}
\graphicspath{{images/}}
\usepackage{footmisc}
\usepackage{xcolor}
%\pagecolor[rgb]{0.8,0.76,0.7}
\usepackage{float}
\usepackage{caption}
\usepackage{subcaption}
\captionsetup{compatibility=false}
\usepackage{hyperref}
\hypersetup{
colorlinks,
citecolor=black,
filecolor=black,
linkcolor=black,
urlcolor=blue
}
\title{Polytope}
\author{URL, Mathtician, Galoomba,}
\date{February-May 2021}
\begin{document}
\maketitle
\section{Introduction}
Welcome to the \href{https://discord.gg/invite/zMRu7T4}{Polytope Discord}!
\section{What is a polytope?}
Roughly speaking, a \textbf{polytope} is an $n$-dimensional shape.
A polytope in $n$ dimensions (known hereafter as an $n$-polytope)
is made of \textbf{facets} which are $(n-1)$-polytopes.
\begin{itemize}
\item \textbf{Points} ($0$-polytopes) are the facets of \textbf{line segments} ($1$-polytopes),
\item which are the facets of \textbf{polygons} ($2$-polytopes),
\item which are the facets of \textbf{polyhedra} ($3$-polytopes),
\item which are the facets of \textbf{polychora} ($4$-polytopes), etc.
\end{itemize}
Collectively, a polytope's vertices, edges, faces,
and so on are known as its \textbf{elements}.
For a more formal definition, see Section \ref{indepth}: \nameref{indepth}.
\section{Types of polytopes}
General polytopes can be very complicated. Therefore, we only tend to study
specific categories of polytopes.
\subsection{Isogonal polytopes}
\label{isogonal}
A polytope is \textbf{isogonal} or \textbf{vertex-transitive} if any vertex can be taken to any other
vertex with a rotation or reflection such that the polytope ends up looking the same.
Let's look at a few examples.
\begin{figure}[H]
\centering
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{dodecahedron}
\caption{Dodecahedron}
\label{fig:doe}
\end{subfigure}%
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{truncated_cuboctahedron}
\caption{Truncated cuboctahedron}
\label{fig:girco}
\end{subfigure}%
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{square_pyramid}
\caption{Square pyramid}
\label{fig:squippy}
\end{subfigure}%
\caption{Three polyhedra. The first two are isogonal, the third is not.}
\label{fig:polyhedra1}
\end{figure}
\begin{itemize}
\item A dodecahedron is isogonal; any of its vertices can be taken to any other vertex with a
rotation.
\item A truncated cuboctahedron is also isogonal; some pairs of vertices require a reflection, but
that's allowed.
\item A square pyramid is not isogonal; the top vertex is not equivalent to the four base vertices.
\end{itemize}
A lot of interesting categories of polytopes are isogonal.
\subsection{Regular polytopes}
Regular polytopes are probably the most widely known category. For example, there are nine
regular polyhedra\footnote{
Readers coming from jan Misali's video on regular polyhedra may find fault with this,
but the fact of the matter is that skew polytopes are rarely mentioned or included in our lists,
and infinite polytopes are usually not included.
}:
\begin{figure}[H]
\centering
\begin{subfigure}{.2\textwidth}
\centering
\includegraphics[width=.7\linewidth]{tet}
\caption{Tetrahedron}
\label{fig:r3_tet}
\end{subfigure}%
\begin{subfigure}{.2\textwidth}
\centering
\includegraphics[width=.7\linewidth]{cube}
\caption{Cube}
\label{fig:r3_cube}
\end{subfigure}%
\begin{subfigure}{.2\textwidth}
\centering
\includegraphics[width=.7\linewidth]{oct}
\caption{Octahedron}
\label{fig:r3_oct}
\end{subfigure}%
\begin{subfigure}{.2\textwidth}
\centering
\includegraphics[width=.7\linewidth]{doe}
\caption{Dodecahedron}
\label{fig:r3_doe}
\end{subfigure}%
\begin{subfigure}{.2\textwidth}
\centering
\includegraphics[width=.7\linewidth]{ike}
\caption{Icosahedron}
\label{fig:r3_ike}
\end{subfigure}%
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{gad}
\caption{Great dodecahedron}
\label{fig:r3_gad}
\end{subfigure}%
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{sissid}
\caption{Small stellated\\dodecahedron}
\label{fig:r3_sissid}
\end{subfigure}%
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{gike}
\caption{Great icosahedron}
\label{fig:r3_gike}
\end{subfigure}%
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{gissid}
\caption{Great stellated\\dodecahedron}
\label{fig:r3_gissid}
\end{subfigure}%
\caption{The nine regular polyhedra.}
\label{fig:regulars3D}
\end{figure}
These polyhedra are all transitive on their vertices, edges, and faces. Formally, regularity
requires a stricter notion known as \textbf{flag-transitivity}, which is explained in Section
\ref{flag}, but does not make a difference in this case.
The first five of these are \textbf{convex}, and the last four are \textbf{nonconvex}. Formally,
the definition of convexity is "any line segment connecting two points on the surface lies
entirely within the shape", but intuitively it can be thought of as "without dents, holes, or
self-intersections" or as the shape a rubber band or sphere would make.
\begin{itemize}
\item In 2D, there are infinitely many regular polytopes, they are the regular polygons with any
number of sides, including star polygons such as the pentagram.
\item In 3D, there are the 9 regular polyhedra from above.
\item In 4D, there are 16 regular polychora, of which 6 are convex and 10 are nonconvex.
\item In 5D and up, there are only 3 regular polytopes in each dimension, they are members of
the infinite families of \textbf{simplexes}, \textbf{orthoplexes}, and \textbf{hypercubes},
analogues of the 3D tetrahedron, octahedron, and cube, respectively.
\end{itemize}
\subsubsection{Schläfli symbols}
\subsection{Uniform polytopes}
A type of polytopes we study a lot in this server are \textbf{uniform polytopes}.
Uniformity is defined recursively. In 2D, uniform polytopes are simply the regular polygons.
In higher dimensions, uniform polytopes are the vertex-transitive polytopes whose facets are
all uniform. To see what we mean, let's look at a few examples.
\begin{figure}[H]
\centering
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{tut}
\caption{Truncated tetrahedron\\(tut)}
\label{fig:tut}
\end{subfigure}%
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{did}
\caption{Dodecadodecahedron\\(did)}
\label{fig:did}
\end{subfigure}%
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{sided}
\caption{Snub icosidodecadodecahedron\\(sided)}
\label{fig:sided}
\end{subfigure}%
\caption{Three examples of uniform polyhedra. They all have regular polygonal faces
(corresponding to the 2D uniforms) and are vertex-transitive.}
\label{fig:uniforms3D}
\end{figure}
In 3D, the uniform polytopes have already been enumerated. It turns out that, aside from the
infinite families of \textbf{prisms} and \textbf{antiprisms}, there's exactly
75 uniform polyhedra.
% TODO: Link to a listing
\begin{figure}[H]
\centering
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=.5\linewidth]{hep}
\caption{Heptagonal prism (hep)}
\label{fig:hep}
\end{subfigure}%
\begin{subfigure}{.5\textwidth}
\centering
\includegraphics[width=.5\linewidth]{heap}
\caption{Heptagonal antiprism (heap)}
\label{fig:heap}
\end{subfigure}%
\caption{An example of a prism and an antiprism. These can be built from any regular polygon,
and made uniform in all cases.}
\label{fig:prisms}
\end{figure}
In 4D and higher up, the problem of enumerating the uniforms remains unsolved.
As of May 2021, we know of two infinite families plus 2188 uniform polychora.
In 5D and up, we haven't yet done a thorough examination, though we know of various
constructions that generate uniforms in any dimension.
\subsubsection{Armies and regiments}
Take a look at four of the regular polyhedra again:
\begin{figure}[H]
\centering
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{ike}
\caption{Icosahedron}
\end{subfigure}%
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{gad}
\caption{Great dodecahedron}
\end{subfigure}%
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{sissid}
\caption{Small stellated\\dodecahedron}
\end{subfigure}%
\begin{subfigure}{.25\textwidth}
\centering
\includegraphics[width=.56\linewidth]{gike}
\caption{Great icosahedron}
\end{subfigure}%
\caption{The four regular polyhedra in the icosahedron army.}
\label{fig:ike_army}
\end{figure}
You might notice that all of these have 12 vertices in the same arrangement. We call a set of
polytopes with the same vertices an \textbf{army}. Armies are named after the convex member, in
this case the icosahedron.
Furthermore, the icosahedron and the great dodecahedron also share their edges, and so do the
small stellated dodecahedron and the great icosahedron. We call a set of polytopes with the same
edges a \textbf{regiment}.
Regiments are useful in studying uniform and scaliform\footnote{See Section \ref{scaliform}.}
polytopes for two reasons:
\begin{itemize}
\item Uniformity and scaliformity requires all edges to be equal length, which is
preserved in a regiment.
\item Polytopes in the same regiment have vertex figures\footnote{See Section \ref{verf}.}
in the same army, which means regiment members can be found by faceting the vertex
figure, which has one less dimension and is much simpler.
\end{itemize}
\subsubsection{Coxeter Diagrams}
\subsubsection{Multiprisms}
\subsection{CRF polytopes}
\label{crf}
A polytope is called \textbf{convex regular-faced}, or \textbf{CRF} for short, when it is convex
(without dents, holes or self-intersections) and all of its faces are regular. Let's look at a few
examples.
\begin{figure}[h]
\centering
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{Sphenomegacorona}
\caption{Sphenomegacorona}
\label{fig:polyhedra_1}
\end{subfigure}%
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{Hebesphenomegacorona}
\caption{Hebesphenomegacorona}
\label{fig:polyhedra_2}
\end{subfigure}%
\begin{subfigure}{.33333\textwidth}
\centering
\includegraphics[width=.5\linewidth]{Disphenocingulum}
\caption{Disphenocingulum}
\label{fig:polyhedra_3}
\end{subfigure}%
\caption{Test images!}
\label{fig:crf_polyhedra}
\end{figure}
The CRF polyhedra that are not also uniform are called the \textbf{Johnson solids}, named after
Norman Johnson who enumerated all 92 of them.
In 4D and up, the count of CRF polytopes explodes. There's thought to be at least about $10^{9}$
non-uniform CRF polychora, probably even more. Because of this, people usually study subsets of CRFs.
\subsection{Scaliform polytopes}
\label{scaliform}
\textbf{Scaliformity} is a less strict version of uniformity. It does not require that
all facets are uniform, only that all edges are the same length.\footnote{
It still requires isogonality of course.}
In 3D, the scaliform polytopes are the same as the uniform polytopes, because all the polygons
that can be faces of a scaliform polyhedron are regular and thus uniform.\footnote{
For a detailed explanation, see Section \ref{circum}.} But there are
polyhedra, such as some of the Johnson solids\footnote{See Section \ref{crf}.}, that can be facets
of scaliform polychora. So scaliform polytopes are distinct from uniform polytopes in 4D and up.
\begin{figure}[H]
\centering
\includegraphics[width=.3\linewidth]{tut=invtut.png}
\caption{One of the easiest to understand scaliform polytopes is the polychoron
\textit{truncated tetrahedral alterprism}. It consists of two truncated tetrahedra
in opposite orientations, their faces are connected by triangular cupolae, and
their edges are connected by tetrahedra. It is not uniform because it has non-uniform
facets (the triangular cupolae), but it is still isogonal and equilateral, thus it is
scaliform.}
\end{figure}
\section{Useful terms}
\subsection{Vertex figure}
\label{verf}
A \textbf{vertex figure}, or \textbf{verf} for short, is a polytope that shows how
the facets of another polytope are arranged around a vertex. Intuitively, it can
be understood as the shape that gets exposed when you slice off a vertex of a polytope.
\begin{figure}[H]
\centering
\includegraphics[width=.3\linewidth]{cube_verf.png}
\caption{The verf of a cube is a triangle. The edges of the triangle are
themselves verfs of the faces of the cube.}
\end{figure}
A vertex figure has 1 less dimension than the original polytope. Verfs of polyhedra are
polygons, verfs of polychora are polyhedra, etc.
The facets of a vertex figure are the vertex figures of the facets of the original polytope,
and generally the elements of a vertex figure are the vertex figures of the elements of the
original polytope.
\section{In-depth definitions}
\label{indepth}
This section explains in detail some terms and definitions. If you're just starting out, you
don't need to care about this.
\subsection{Polytope}
As with many terms used on the Polytope Discord,
the word ``polytope'' can have a few definitions which are not completely equivalent.
This section will list four properties of a polytope from least to most controversial.
The most common definition is the strictest, requiring all four.
\begin{enumerate}
\item
A polytope in $n$ dimensions (known hereafter as an $n$-polytope)
is made of \textbf{facets} which are $(n-1)$-polytopes.
\begin{itemize}
\item \textbf{Points} ($0$-polytopes) are the facets of \textbf{line segments} ($1$-polytopes),
\item which are the facets of \textbf{polygons} ($2$-polytopes),
\item which are the facets of \textbf{polyhedra} ($3$-polytopes),
\item which are the facets of \textbf{polychora} ($4$-polytopes), etc.
\end{itemize}
Note that an $n$-polytope must lie in $n$-dimensional space.
A square with a vertex jutting out of the plane is usually not a bona fide polygon
but is instead given the name ``\textbf{skew} polygon.''
If the skew square is a face of a larger 3D shape,
said 3D shape is not considered a polyhedron either,
since its faces are not all polygons.
Readers coming from jan Misali's video on regular polyhedra may find fault with this,
but the fact of the matter is that
skew polytopes are rarely mentioned or included in our lists.
For the next criterion, consider the following figures ABC and DEF.
\end{enumerate}
\begin{center}
\begin{tikzpicture}
\node (a) at (0,0) {A};
\node (b) at (0.5,0.866) {B};
\node (c) at (1,0) {C};
\draw (a) -- (b) -- (c) -- (a);
\end{tikzpicture}
\begin{tikzpicture}
\node (d) at (0,0) {D};
\node (e) at (0.5,0.866) {E};
\node (f) at (1,0) {F};
\draw (e) -- (d) -- (f);
\end{tikzpicture}
\end{center}
ABC is a triangle, a polygon with three line segments, or \textbf{edges},
as they are known when mentioned as part of a larger polytope.
It also contains three points, or \textbf{vertices}, or even \textbf{verts} for short.
(In the polytope world, abbreviations are everywhere!)
DEF, on the other hand, is not a triangle; it is missing an edge,
leaving ``open ends'' at E and F which are each connected to only one edge.
To exclude DEF and figures like it,
we require that within a polygon, every vertex be connected to exactly two edges.
This condition also excludes ``branches'' where more than two edges meet at a vertex.
Let's generalize this rule to $n$-polytopes.
Two 3D figures are shown below.
\begin{center}
\begin{tikzpicture}
\coordinate (A0) at (1,1,1);
\coordinate (A1) at (1,1,-1);
\coordinate (A2) at (1,-1,1);
\coordinate (A3) at (1,-1,-1);
\coordinate (A4) at (-1,1,1);
\coordinate (A5) at (-1,1,-1);
\coordinate (A6) at (-1,-1,1);
\coordinate (A7) at (-1,-1,-1);
\draw[dashed,fill=cyan,opacity=0.6] (A4) -- (A5) -- (A7) -- (A6);
\draw[dashed,fill=magenta,opacity=0.6] (A1) -- (A5) -- (A7) -- (A3);
\draw[dashed,fill=yellow,opacity=0.6] (A2) -- (A6) -- (A7) -- (A3);
\draw[thick,fill=red,opacity=0.6] (A0) -- (A1) -- (A3) -- (A2);
\draw[thick,fill=green,opacity=0.6] (A0) -- (A4) -- (A6) -- (A2);
\draw[thick,fill=blue,opacity=0.6] (A0) -- (A4) -- (A5) -- (A1);
\end{tikzpicture}
\begin{tikzpicture}
\coordinate (A0) at (1,1,1);
\coordinate (A1) at (1,1,-1);
\coordinate (A2) at (1,-1,1);
\coordinate (A3) at (1,-1,-1);
\coordinate (A4) at (-1,1,1);
\coordinate (A5) at (-1,1,-1);
\coordinate (A6) at (-1,-1,1);
\coordinate (A7) at (-1,-1,-1);
\draw[dashed,fill=cyan,opacity=0.6] (A4) -- (A5) -- (A7) -- (A6);
\draw[dashed,fill=magenta,opacity=0.6] (A1) -- (A5) -- (A7) -- (A3);
\draw[dashed,fill=yellow,opacity=0.6] (A2) -- (A6) -- (A7) -- (A3);
\draw[thick,fill=red,opacity=0.6] (A0) -- (A1) -- (A3) -- (A2);
\draw[thick,fill=green,opacity=0.6] (A0) -- (A4) -- (A6) -- (A2);
\draw[thick] (A4) -- (A5) -- (A1);
\draw[thick] (A5) -- (-1,0.2,-1);
\end{tikzpicture}
\end{center}
The left figure is a cube, a polyhedron with
six square \textbf{faces}, twelve edges, and eight vertices.
The right is the same, but with the top face removed.
The right figure is not a polyhedron because, like DEF, it leaves ``open ends.''
However, in this case, the open ends are not vertices but the top four edges,
which are each connected to only one face.
We require that within a polyhedron, every edge be connected to exactly two faces.\footnote{
The previous rule for polygons does not apply to the cube nor to other polyhedra;
every vertex of the cube is connected to three edges, not two.
However, the rule does apply to each of the cube's square faces.
}
Are you beginning to see a pattern?
In an $n$-polytope, removing an $(n-1)$-dimensional facet
creates ``open ends'' in the $(n-2)$-dimensional ``facets of facets,'' or \textbf{ridges}.
Ridges are the vertices of polygons, the edges of polyhedra, the faces of polychora, and so on.
Thus the next trait of a polytope is:
\begin{enumerate}
\setcounter{enumi}{1}
\item Every ridge must be connected to exactly two facets.
\end{enumerate}
Collectively, a polytope's vertices, edges, faces,
and so on are known as its \textbf{elements}.
We may keep track of which elements have which others as facets using a \textbf{Hasse diagram},
shown below for the triangle ABC.
\begin{center}
\begin{tikzpicture}
\node (abc) at (0,3) {ABC};
\node (ab) at (-1,2) {AB};
\node (ac) at (0,2) {AC};
\node (bc) at (1,2) {BC};
\node (a) at (-1,1) {A};
\node (b) at (0,1) {B};
\node (c) at (1,1) {C};
\node (o) at (0,0) {$\emptyset$};
\draw (o) -- (a) -- (ab) -- (abc) -- (ac) -- (c)
(b) -- (o) -- (c) -- (bc) -- (abc)
(a) -- (ac);
\draw[preaction={draw=white, -,line width=6pt}] (ab) -- (b) -- (bc);
\end{tikzpicture}
\end{center}
Each node of the Hasse diagram represents an element of ABC,
including both the whole triangle at the top
and the \textbf{null element} $\emptyset$ at the bottom.\footnote{
$\emptyset$, also known as the \textbf{nullitope}, is a convenient edge case.
It has no vertices or other elements and is considered to be $-1$-dimensional.
}
Whenever two nodes of the diagram are connected,
the higher node's element contains the lower node's element as a facet.
For example, the edge AC contains the vertex C,
so their nodes are connected in the diagram with AC above C.
Notice that the structure of the diagram does not depend on
where the vertices of the original figure are;
a scalene triangle would have the same Hasse diagram as the equilateral ABC.
A diagram on its own, without the locations of the vertices,
is also known as an \textbf{abstract polytope}.
For example, consider the following diagram:
\begin{center}
\begin{tikzpicture}
\node (s) at (0,3) {S};
\node (ab) at (-0.5,2) {AB};
\node (ba) at (0.5,2) {BA};
\node (a) at (-0.5,1) {A};
\node (b) at (0.5,1) {B};
\node (o) at (0,0) {$\emptyset$};
\draw (o) -- (a) -- (ab) -- (s) -- (ba) -- (b) -- (o)
(a) -- (ba);
\draw[preaction={draw=white, -,line width=6pt}] (b) -- (ab);
\end{tikzpicture}
\end{center}
It represents an abstract ``polytope'' with
one null element, two vertices, two edges, and one face (reading from the bottom up).
It satisfies the two conditions given thus far and is often called the digon (two-sided polygon).
However, its two edges AB and BA contain the same vertices
and will lie on top of each other when drawn on a sheet of paper.
For this reason, the digon is not considered a polytope.
Likewise, a quadrilateral with two vertices in the same spot,
which would look like a triangle when drawn, is also not a polytope.
This leads us to the third criterion:
\begin{enumerate}
\setcounter{enumi}{2}
\item No two elements of a polytope may coincide:
\begin{enumerate}
\item no two elements (other than vertices and $\emptyset$) may have the same facets and
\item no two vertices may have the same location.
\end{enumerate}
\end{enumerate}
Figures which pass the first and second tests but not this third,
such as the digon, are called \textbf{fissaries}.
Note than condition 3(b) is the first which cannot be tested just from the Hasse diagram.
Condition 1 is equivalent to requiring that the Hasse diagram be organized into ``layers,''
with one element on the top (the whole polytope) and another on the bottom ($\emptyset$).
Finally, perhaps the most contentious part of the definition:
\begin{enumerate}
\setcounter{enumi}{3}
\item
A polytope is \textit{connected}, i.e.
it is possible to reach any facet from any other facet
by repeatedly jumping to adjacent facets.
\footnote{
Here, ``adjacent'' means ``sharing a ridge,''
e.g. two edges of a polygon which share a point,
or two faces of a polyhedron which share an edge.
}
\end{enumerate}
Two triangles next to each other a polygon do not make!
Figures which fail to satisfy this criterion are known as \textbf{compounds}.
The question ``Are compounds polytopes?'' sparked lively discussusions
the first few times it was brought up on the server
and weary sighs thereafter.
With that, the definition of a polytope is complete!
\footnote{
Perhaps the most common requirement for a polytope seen elsewhere but not here
is convexity: not having dents, holes, or self-intersections.
All of those are perfectly fine and, in fact, overwhelmingly common.
}
\subsection{Regular polytopes and flags}
\label{flag}
If you ask a server member what a regular polytope is,
they might reply ``a regular polytope is \textbf{flag-transitive}.''
This raises two questions: what is a flag, and what is ``-transitive?''
To answer the first question, reconsider the Hasse diagram of triangle ABC from earlier.
A flag of the triangle is a sequence of elements that starts with $\emptyset$ and ends with ABC,
where each entry in the sequence contains the previous one.
In other words, a flag is a path drawn through the diagram from bottom to top.
For example, $\emptyset$ $\rightarrow$ B $\rightarrow$ AB $\rightarrow$ ABC is a flag.
ABC happens to have six of them:
starting from $\emptyset$, there are three choices of vertex,
then two choices of which other vertex to include in the edge,
then only one choice of ABC as the final element.
Next, what is ``-transitive?''
Note that I keep the hyphen
because ``transitivity'' on its own is not a property;
something needs to come before the word.
A polytope is $x$-transitive if
every $x$ of the polytope can be moved to any other $x$
by some combination of rotation, reflection, and occasionally translation,
so that the polytope as a whole looks the same afterward.
For example, the following hexagon is vertex-transitive (or \textbf{isogonal}),
but not edge-transitive.
\begin{center}
\includegraphics[scale=0.1]{isogonal_hexagon.png}
\end{center}
Any of the six black vertices (intersection points don't count!)
can be moved to any other vertex with rotation or reflection.
However, an orange edge cannot be moved to a red one
without changing the shape, size, or orientation of the hexagon.
\subsection{Circumscribability and orbiformity}
\label{circum}
A $n$-polytope is \textbf{circumscribable} if its vertices lie on a $n-1$-sphere.
For a polygon, this means they lie on a circle, for a polyhedron, a sphere.
All elements of an isogonal spherical polytope are circumscribable. Take the example of
faces of a polyhedron: all the vertices of the polyhedron are equivalent and thus lie on a
sphere. A face of a polyhedron lies on a plane, and any intersection of a plane and a sphere
is a circle. So the vertices of the face lie on a circle. This works in any dimension.
A polytope is \textbf{orbiform} if it is circumscribable and all its edges are the same length.
Examples are all scaliform (including uniform) polytopes, some of the Johnson solids\footnote{
See Section \ref{crf}.} and higher-dimensional CRF polytopes, and many nonconvex regular-faced
polytopes. All orbiform polygons are regular.
All elements of a scaliform polytope are orbiform.
\end{document}
| {
"alphanum_fraction": 0.730007764,
"avg_line_length": 38.6786786787,
"ext": "tex",
"hexsha": "403ad36216472fef532db5fa21cd666f231cd016",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-02-21T03:22:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-02-21T03:22:19.000Z",
"max_forks_repo_head_hexsha": "c8b660bba8102d7152ea6f6c7bcea1b9a333c8ef",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vihdzp/polytope-intro",
"max_forks_repo_path": "doc.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c8b660bba8102d7152ea6f6c7bcea1b9a333c8ef",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vihdzp/polytope-intro",
"max_issues_repo_path": "doc.tex",
"max_line_length": 101,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "c8b660bba8102d7152ea6f6c7bcea1b9a333c8ef",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "OfficialURL/polytope-intro",
"max_stars_repo_path": "doc.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-13T03:53:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-13T03:53:18.000Z",
"num_tokens": 7809,
"size": 25760
} |
\documentclass[12pt, openany]{book}
\title{NeurExpo User Manual}
\author{Written by Eben Kadile & \newline & CATNIP Laboratory}
\date{}
\usepackage{natbib}
\usepackage{graphicx}
\usepackage{makeidx}
\usepackage{ifthen}
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage{dirtytalk}
\usepackage{fancyhdr}
\usepackage{amsmath, amssymb, amsthm}
\usepackage{geometry}
\geometry{
a4paper,
total={8.5in,9.5in},
top=1in,
left=1in,
right=1in
}
\begin{document}
\pagestyle{fancy}
\fancyhf{}
\lhead{NeurExpo \thepage}
\rhead{CATNIP Laboratory}
\maketitle
\tableofcontents
\newpage
\section{Introduction}
NeurExpo is an application developed for the purpose of visualizing data recorded from neural populations in real-time. The application is written in \texttt{typescript} and is run in a web browser to maximize portability. The package downloadable from github has both the compiled \texttt{javascript} in \texttt{client/js} and the \texttt{typescript} source in \texttt{client/ts}.
The application is designed to receive data from a pre-processing server. That is, a server which performs spike-sorting as well as some additional dimensionality reduction algorithm such as online PCA, Kalman filtering, or Variational Joint Filtering.
An example \texttt{python} server which streams random data can be found in the \texttt{server} directory of the repo. There are two types of data to be visualized: the spikes of the individual neurons being recorded and the latent trajectory of the neural population (which may be higher than 3-dimensional).
The spike trains, which take the form of a sparse binary sequence, are rendered using a non-adaptive version of the \textit{gamma filter}, see section 4.1 or \cite{gammafilter}.
The latent trajectory is rendered using a method which allows the user to adjust which low-dimensional components of the data they are viewing.
Now I will give an in-depth tour of the GUI, explain how to transmit data from a server to the client, and describe the algorithms used to render each type of data.
\section{System Requirements}
The hardware requirements for the client will vary based on the dimensionality of the latent trajectory and the number of spike trains that are to be visualized. Any system with processors whose speed is at least 2 GHz and a graphics card should be absolutely fine. There is a setting which can make the graphics even less costly, so machines with integrated graphics should be able to render the visualization in real time as well.
The client system ought to have a web browser installed which is compatible with \texttt{websockets} and \texttt{WebGL} (most modern browsers satisfy this requirement).
In order to run the example \texttt{python} server, all that is required is \texttt{python} 2.7 or later and the `websockets` package.
\chapter{Initial Menu}
\begin{figure}
\includegraphics[width=\linewidth]{NeurExpo_Menu.png}
\caption{The NeurExpo Initial Menu}
\end{figure}
Upon opening \texttt{home.html}, found in the \texttt{client} folder, in your browser, you will be presented with a menu where you must input some information.
\section{IP Address}
The first piece of information is the IP address of the server which will be streaming the data. The server is assumed to be run by you or one of your collaborators, so it is up to you to obtain the IP address.
\section{Ports}
The second field is the port from which the latent trajectory will be streamed. The port from which our example \texttt{python} server streams the trajectory is \texttt{8200}. This can be changed in the \texttt{python} code, if you wish.
The sixth field is the port from which the spikes will be streamed. The port in the example server is \texttt{8400}. This can also be changed from the \texttt{python} code.
\section{Dimension}
The third field is the dimension of the latent trajectory. Once the transmission begins, the dimension will be sent to the server as a string with double quote characters at the beginning and end. This feature exists so that the server may adjust its inference algorithm to produce a trajectory of the desired dimension.
\section{Points on Trajectory}
The number of points on the trajectory determines how much history of the trajectory you will be able to view at once. A number between 100 and 500 is typically best for visualization. Entering a very large number may cause the visualization to lag.
\section{Camera Distance}
The distance from the camera to the origin can be adjusted during visualization using the scroll-wheel on your mouse. However, we must initialize this distance. The only way to appropriately initialize this distance is to know the approximate magnitude of the points which are being produced by the server's inference algorithm. Our example server produces points which typically have magnitude less than 1, so we can initialize the camera distance to 3.
\section{Number of Spike Trains}
This field simply corresponds to the number of spike train channels the server will be streaming. Once the transmission begins, this number is also sent to the server as a string with double quotes on both ends so that it can be compared to the number of spikes that the server was intending to receive. In the example server, the number of spike trains is simply this number which is received from the client.
\section{Time Resolution}
This field is in units of milliseconds, and tells you how history-sensitive the spike train rendering will be. If the number is too low, the spikes will fade too quickly to be seen. If the number is too high, old spikes will linger and it will become impossible to distinguish between neurons that are firing. Values will typically be between 10 and 100 milliseconds. If you expect the spike trains to be very sparse, the value should be higher; if you expect the firing rates to be high, the value should be lower. See the section on spike rendering for further details.
\section{Anti-aliasing}
Anti-aliasing refers to rendering a desired image multiple times with different offsets and then interpolating between the results so as to soften any pixelated edges that arise when the image is rendered only once. The render loop for \texttt{NeurExpo} is cheap, so most GPUs should be able to handle anti-aliasing. However, if you know you have a slow GPU, you may want to un-check \say{anti-aliasing}. This is a purely aesthetic feature; it will have no effect on the utility of the visualization.
\chapter{Rendering the Latent Trajectory}
\section{Covering the Grassmannian}
The fundamental challenge of rendering the latent trajectory is how to allow the user to view multiple 3-dimensional aspects of the high-dimensional data.
We solve this problem by allowing the user to adjust different sliders to view different low-dimensional components of the data, see Figure 2.1. More specifically, different configurations of the slider values correspond to different points on the Grassmannian: $Gr(3,n)$ where $n$ is the dimension of the trajectory. If you have little interest in the mathematics of the Grassmannian, you can still easily explore the low-dimensional components of your data by simply playing with the slider values.
\begin{figure}
\includegraphics[width=\linewidth]{latentLorenz.png}
\caption{A 6-dimensional trajectory corresponding to a 3-dimensional Lorenz system. The Sliders below the Pause button will allow you to explore the different 3-dimensional projections of the trajectory.}
\end{figure}
The Grassmannian $Gr(k,n)$ is the topological space of k-dimensional linear subspaces of $\mathbb{R}^n$. This space has a manifold structure (indeed, it has a smooth structure as well), meaning some open sets of the Grassmannian can be parametrized by real numbers. We will focus on the Grassmannian $Gr(3,n)$, since this is what is relevant to our application.
We parametrize a relatively large open set of the $Gr(3,n)$ and then allow the user to adjust the real-valued parameters to determine which 3-plane the trajectory is projected onto.
Although some neighborhoods of the Grassmannian can be parametrized injectively, we sacrifice injectivity for the sake of covering a large portion of $Gr(3,n)$ in a computationally efficient manner. Consider the subspace of $\mathbb{R}^n$, $V=span \{\boldsymbol{e}_1,\boldsymbol{e}_2,\boldsymbol{e}_3\}$ and a rank 3 linear map $T:V\rightarrow\mathbb{R}^n$. The reduced column eschelon form of the matrix representation of $T$ will be of the form
$$\begin{bmatrix} 1 & 0 & 0\\ 0 & 1 & 0 \\ 0 & 0 & 1 \\ a_{1,1} & \cdots & a_{1,3} \\ & \vdots & \\ a_{n-3,1} & \cdots & a_{n-3,3} \end{bmatrix}$$
$T$ specifies a 3D subspace of $\mathbb{R}^n$, and the values $a_{i,j}$ specify $T$. The orthographic projection matrix is computed by performing Gram-Schmidt orthonormalization on the columns of the above matrix and then taking the transpose. The orthonormalization is necessary to make sure that the trajectory isn't stretched or skewed after being multiplied by the projection.
If the set of values $a_{i,j}$ are related to another set of values $a'_{i,j}$ by an elementary row operation, then these two sets specify the same 3D subspace of $\mathbb{R}^n$. This introduces an unfortunate redundancy into our parametrization. However, if all $a_{i,j}$ were able to take any real value then the parametrization would be surjective, allowing the user to view the trajectory from any 3D subspace.
Naturally, it is impossible for \textit{all} real numbers to be represented on a computer. Instead we use exponential functions to make the sliders cover a very wide range of real numbers. Each slider takes values between $\pm 7\times 10^{106}$.
For more information on the Grassmannian refer to \cite{grassmannian}.
\section{Protocol for Transmitting the Trajectory}
The bytestring in each packet for the latent trajectory websocket is a sequence of 4-byte groups. The first group is a big endian unsigned integer which indicates how many trajectory points are encoded in the packet. Each 4-byte group after that is a 32-bit big endian floating point number corresponding to a coordinate of a point on the trajectory.
Thus, the dimension of the trajectory can be used to compute how many more 4-byte groups we expect after the group indicating the number of points. For example, if the trajectory is 12-dimensional and the first 4-bytes say that there are 3 points in the packet, then we expect that there are 36 more 4-byte groups in the packet, and that each group of 12 4-byte groups specifies a point in the trajectory.
\chapter{Rendering the Spike Trains}
\section{The Gamma Filter}
Suppose we have time-series data which takes the form of a sparse binary sequence called $s_t$ where $t\in\mathbb{N}$. We wish to construct a way to visualize this data such that we can observe multiple time-scales at once. Let $a$ be a real number. Then we can define the 4-layer gamma filter, $G_t^L$ by
$$G^1_{t+1}=aG^1_t+s_t$$
$$G^2_{t+1}=aG^2_t+G^1_t$$
$$G^3_{t+1}=aG^1_t+G^3_t$$
$$G^4_{t+1}=aG^1_t+G^4_t$$
Each layer follows an exponential decay until it is perturbed by the previous layer or the sequence $s_t$.
If $\Delta t$ is the amount of time between consecutive frames (about 0.15 milliseconds for most machines) then $a=2^{-h/\Delta t}$, where $h$ is the \say{time-resolution} (see section 2.7). This way, each layer follows an exponential decay with half-life $h$ when there is no input to the layer. See \cite{gammafilter} for more theoretical details on the gamma filter (note that that paper presents the gamma filter as an algorithm which \textit{learns} the optimal value for $a$, but we choose it before rendering).
Every spike train has its own gamma filter, but the value of $a$ is the same across all filters. Each filter is rendered as a set of concentric rings, with the inner disk representing $G^1$ and the outer ring representing $G^4$. See Figure 4.1.
\begin{figure}
\includegraphics[width=\linewidth]{spikes.png}
\caption{Example frame of 32 spike trains being rendered.}
\end{figure}
\section{Protocol for Transmitting the Spike Trains}
Due to the nature of the gamma filter, the only data that is transmitted over the spike train websocket is the ID of the channel on which a spike is detected. This means that the client-side scripts do not detect latency in the connection. We have not noticed any performance issues while using a wireless connection. However, if low latency is absolutely critical then I recommend using an ethernet connection instead.
Specifically, every packet in this protocol is a sequence of 8-byte groups. The first four bytes of every group form a big endian unsigned integer indicating the ID of the channel from which a spike was detected. The second four bytes is a big endian unsigned integer which indicates how many spikes were detected on the given channel since the last packet was sent. Since your server can most likely send packets faster than a neuron can fire, this number is almost always 1.
\chapter{Using the Example Servers}
Included in the NeurExpo repo is python code for a pair of example servers. These two scripts were written to provide an accessible illustration of how to use the spike and trajectory transmission protocols.
With python 2.7 or later and the python `websockets` package installed, simply run both of these scripts on separate command lines. You will be asked to input the desired ports, the dimension of the trajectory, and the number of spike trains. After inputting this information, the servers will wait for a request from the client. Enter the corresponding information into the client's menu, and initialize. The trajectory server will send a noisy Lorenz system over the websocket, and the spike train server will send a procedural spike train.
\chapter{Navigating the GUI}
Here we provide a simple walk-through of \texttt{NeurExpo}'s graphical user interface.
After you've entered the necessary information in the initial menu, clicking \say{Initialize} will allow the system to store the information you entered and re-write the \texttt{html} document.
At this stage you may want to check that there were no parse errors in the information that you entered in the menu. Depending on your browser, you can do this by right clicking the window, clicking \say{Inspect Element}, then clicking \say{Console}; any errors should be printed here.
If there do not appear to be any errors, clicking \say{Begin Transmission} will open two websockets to the server: one for the latent trajectory and one for the spike trains. The system will immediately send the server trajectory dimension that it is expecting and the number of spike trains that it is expecting (it is critical that the server and client agree on dimension and number of spike channels). The client will render data as soon as it starts receiving it from the server.
The latent trajectory will be rendered on top. If the trajectory is 3D or higher, you can click and drag on the canvas to rotate the camera as well as hover your cursor on the canvas and use your scroll-wheel to zoom in and out.
The spikes will be rendered on the lower canvas. If there are a lot of them, you may have to scroll down on the page to see them all.
The \say{Pause} button will stop the render loop, but it will not close the websockets or pause the gamma filter. The \say{End Transmission} button will close the websockets.
\bibliographystyle{plain}
\bibliography{references}
\end{document}
| {
"alphanum_fraction": 0.7869696775,
"avg_line_length": 80.9010416667,
"ext": "tex",
"hexsha": "87527199b0e7bf66e8217b8ad97807b21405379f",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f2ec8f0bd8798170d5fd71da2e5a9ce2c49bda22",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "catniplab/NeurExpo",
"max_forks_repo_path": "manual/manual.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f2ec8f0bd8798170d5fd71da2e5a9ce2c49bda22",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "catniplab/NeurExpo",
"max_issues_repo_path": "manual/manual.tex",
"max_line_length": 571,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f2ec8f0bd8798170d5fd71da2e5a9ce2c49bda22",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "catniplab/NeurExpo",
"max_stars_repo_path": "manual/manual.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3614,
"size": 15533
} |
\chapter{Mathematical Background}\label{chap:AppA}
In this part of the appendix we want to discuss some of the mathematical tools we used during the calculations presented in the scope of this thesis in a more formal manner.
The part on the York decomposition is mainly inspired by \cite{Percacci2017}, whereas the conventions for the heat-kernel computations are taken from \cite{PawlowskiNPgaugeLecture} and extended for the matter part, using the conventions from \cite{CodelloPercacciRahmede2008}.
\section{York Decomposition}
In the discussion of gauge theories, it is often very useful to decompose the gauge field $A_{\mu}$ into transversal and longitudinal parts:
\begin{align}
A_{\mu} = A_{\mu}^{\mathrm{T}} + \nabla_{\mu}\phi.
\end{align}
The transversal part is characterized by the fact, that $\nabla^{\mu}A_{\mu}^{\mathrm{T}} = 0$. Using this decomposition, we are able to separate the pure gauge spin-$0$ degrees of freedom from the physical ones, contained in the spin-$1$ part $A_{\mu}^{\mathrm{T}}$.\\
Assuming vanishing boundary terms, integration by parts allows us to change the integration variables in the functional integral, i.\,e.
\begin{align}
\int_x \sqrt{g} \ A_{\mu}A^{\mu} = \int_x \sqrt{g} \ A_{\mu}^{\mathrm{T}}A^{\mathrm{T}, \mu} + \int_x \sqrt{g} \ \phi\left(-\nabla^2\right)\phi.
\end{align}
Note, that we have to take care of the Jacobian $J$ of this variable transformation:
\begin{align}
\left(\dd A_{\mu}\right) \longrightarrow J\left(\dd A_{\mu}^{\mathrm{T}}\right)\left(\dd\phi\right).
\end{align}
To be able to determine the Jacobian for our transformation, the integration measure needs to be normalized. A quite convenient choice is to evaluate the Gaussian integral over the different fields $\psi$ and set the result to one:
\begin{align}\label{eqn:york_measure}
\int\left(\dd\psi\right) \exp\left\{-\int\dd x \ \sqrt{g} \ \psi^2 \right\} = 1,
\end{align}
where we are assuming an Euclidean signature and a curved background metric. With this condition we find:
\begin{align}
1&=J \int\left(\dd A_{\mu}^{\mathrm{T}}\right) \operatorname{e}^{-\int \dd x \sqrt{g} \ A_{\mu}^{\mathrm{T}} A^{\mathrm{T}, \mu}}
\int(\dd\phi) \operatorname{e}^{-\int \dd x \sqrt{g} \ \phi\left(-\nabla^{2}\right) \phi} = J\left(\operatorname{det}_{\phi}^{\prime}\left(-\nabla^{2}\right)\right)^{-1/2}.
\end{align}
This allows us to determine the Jacobian $J$ as follows:
\begin{align}
J = \left(\operatorname{det}_{\phi}^{\prime}\left(-\nabla^{2}\right)\right)^{1/2}.
\end{align}
The prime denotes the fact, that the zero mode has to be removed, when computing the determinant to obtain a consistent result. Physically this is in accordance with the fact, that a constant $\phi$ does not contribute to $A_{\mu}$.\\
For our computation in chapters \ref{chap:EHT} and \ref{chap:Matter}, we used the background field method, where we assume a linear split of the \textit{full} metric $g_{\mu\nu}$ into a background metric $\bar{g}_{\mu\nu}$ and a fluctuation field $h_{\mu\nu}$. There is an analogous way of decomposing the fluctuation field in the background field formalism. First, we split $h_{\mu\nu}$ into
\begin{align}
h_{\mu\nu} = h_{\mu\nu}^{\mathrm{T}} + \frac{1}{d}\ \bar{g}_{\mu\nu}h,
\end{align}
where $h_{\mu\nu}^{\mathrm{T}}$ is traceless, i.\,e. $\bar{g}^{\mu\nu}h_{\mu\nu}^{\mathrm{T}}=0$ and $h=\bar{g}^{\mu\nu}h_{\mu\nu}$. The traceless part can be further decomposed in flat space using the irreducible representations of the Lorentz group with spins 0, 1 and 2 respectively, but in our case a more sophisticated approach, the so-called \textit{York decomposition} is chosen:
\begin{align}
h_{\mu\nu} = h_{\mu\nu}^{\text{TT}} + \bar{\nabla}_{\mu}\xi_{\nu} + \bar{\nabla}_{\nu}\xi_{\mu} + \left(\bar{\nabla}_{\mu}\bar{\nabla}_{\nu} - \frac{1}{d} \ \bar{g}_{\mu\nu}\bar{\nabla}^2\right)\sigma + \frac{1}{d} \ \bar{g}_{\mu\nu}h.
\end{align}
Here, $ h_{\mu\nu}^{\text{TT}}$ is a transverse-traceless, spin-2 degree of freedom, $\xi_{\mu}$ is transverse and carries a spin-1 d.\,o.\,f. and $\sigma$ and $h$ possess spin-0. As before, we want to find the Jacobian $J$ for this variable transformation:
\begin{align}
\left(\dd h_{\mu\nu}\right) \longrightarrow J \left(\dd h_{\mu\nu}^{\mathrm{TT}}\right) \left(\dd\xi_{\mu}\right)\left(\dd\sigma\right)\left(\dd h\right).
\end{align}
This is again possible after specifying a suitable normalization of the functional measure as
\begin{align}
\int (\dd h_{\mu\nu}) \exp\left\{-\mathcal{G}(h, h)\right\} = 1,
\end{align}
where $\mathcal{G}$ is an inner product in the space of symmetric two-tensors, defined as
\begin{equation}
\begin{aligned}
\mathcal{G}(h, h)&= \int_x \sqrt{\bar{g}} \ \left(h_{\mu \nu} h^{\mu \nu}+\frac{a}{2} h^{2}\right) \\[10pt]
&= \int_x \sqrt{\bar{g}} \ \left[h^{\mathrm{TT}}_{\mu \nu} h^{\mathrm{TT}, \mu \nu}+2 \xi_{\mu}\left(-\bar{\nabla}^{2}-\frac{\bar{R}}{d}\right) \xi^{\mu}\right. \\
&+\left.\frac{d-1}{d} \sigma\left(-\bar{\nabla}^{2}\right)\left(-\bar{\nabla}^{2}-\frac{\bar{R}}{d-1}\right) \sigma+\left(\frac{1}{d}+\frac{a}{2}\right) h^{2} \right]
\end{aligned}
\end{equation}
in the case of an Einstein type background metric\footnote{A metric is of Einstein type, if $R_{\mu\nu}$ is a constant multiple of $g_{\mu\nu}$, i.\,e. $R_{\mu\nu} = \frac{1}{d} \mathcal{R} g_{\mu\nu}$.}. This yields
\begin{align}
J=\left(\operatorname{det}_{\xi}\left(-\bar{\nabla}^{2}-\frac{R}{d}\right)\right)^{1 / 2}\left(\operatorname{det}_{\sigma}^{\prime}\left(-\bar{\nabla}^{2}\right)\right)^{1 / 2}\left(\operatorname{det}_{\sigma}\left(-\bar{\nabla}^{2}-\frac{R}{d-1}\right)\right)^{1 / 2}.
\end{align}
Note, that the prime has the same meaning and physical interpretation as in the previous case: If $\sigma$ is constant, it does not contribute to $h_{\mu\nu}$. \\
In both cases, the decomposition of the general gauge field and the York decomposition of the fluctuation field, appropriate rescalings of the fields $\phi$, $\xi_{\mu}$ and $\sigma$ respectively, help us to cancel the non-trivial Jacobians and to achieve, that all modes have the same mass dimension. For the sake of completeness, we present the rescaled versions of the fields:
\begin{align}
\hat{\phi} &= \sqrt{-\nabla^2}\ \phi \\[10pt]
\hat{\xi}_{\mu} &= \sqrt{-\bar{\nabla}^{2}-\frac{\bar{R}}{d}}\ \xi_{\mu} \\[10pt]
\hat{\sigma} &= \sqrt{-\bar{\nabla}^{2}} \sqrt{-\bar{\nabla}^{2}-\frac{\bar{R}}{d-1}}\ \sigma.
\end{align}
The resulting graviton two-point function, after decomposition of the fluctuation field has the following structure:
\begin{equation} \Gamma^{(2)}_{hh} =
\begin{pmatrix}
\Gamma^{(2)}_{h^{\mathrm{TT}}h^{\mathrm{TT}}} & 0 & 0 & 0 \\[10pt]
0 & \Gamma^{(2)}_{\xi\xi} & 0 & 0 \\[10pt]
0 & 0 & \Gamma^{(2)}_{h^{\mathrm{Tr}}h^{\mathrm{Tr}}} & \Gamma^{(2)}_{h^{\mathrm{Tr}}\sigma} \\[10pt]
0 & 0 & \Gamma^{(2)}_{\sigma h^{\mathrm{Tr}}} & \Gamma^{(2)}_{\sigma\sigma} \\
\end{pmatrix}.
\end{equation}
This concludes our discussion of the York decomposition, as a useful tool to simplify calculations in the background field method.
\newpage
\section{Heat-Kernel Techniques}\label{sec:heat-kernel}
\vspace{-0.2cm}
We use heat-kernel techniques to evaluate the r.\,h.\,s. of the flow equation (\ref{eqn:Wetterich}), where we need to compute the functional trace over functions depending on the Laplacian on a curved background. In general, the method can be understood as a curvature expansion on a flat background. \\
The general formula to compute such traces is given by
\begin{align}
\operatorname{Tr} f(\Delta)= N \ \int\kern-1.3em\sum_{\ell} \rho(\ell) f(\lambda(\ell)),
\label{eqn:heat-kernel}
\end{align}
with some normalization $N$, the spectral values $\lambda(\ell)$ and their corresponding multiplicities $\rho(\ell)$. \\
On flat backgrounds, the computation of (\ref{eqn:heat-kernel}) is simply a standard momentum integral, whereas on curved backgrounds, consider for example a four-sphere $\mathbb{S}^4$ with constant background curvature $r = \frac{\bar{\mathcal{R}}}{k^2} > 0$, the spectrum of the Laplacian is discrete and we need to sum over all spectral values. \\
For our example of $\mathbb{S}^4$, we have
\begin{align}
\lambda(\ell) = \frac{\ell(3+\ell)}{12}r \qquad \text{and} \qquad \rho(\ell) = \frac{(2\ell + 3)(\ell+2)!}{6\ell!}.
\end{align}
The normalization is then given by the inverse of the four-sphere-volume $ \left(V_{\mathbb{S}^4}\right)^{-1} = \frac{k^4r^2}{384\pi^2}$. This leads us to the formula for our computation of the r.\,h.\,s. of the flow equation on a background with constant positive curvature:
\begin{align}
\operatorname{Tr} f(\Delta)=\frac{k^{4} r^{2}}{384 \pi^{2}} \sum_{\ell=0}^{\infty} \frac{(2 \ell+3)(\ell+2) !}{6 \ell !} f\left(\frac{\ell(3+\ell)}{12} r\right).
\end{align}
This is called spectral sum. For large curvatures $r$ the convergence of the series is rather fast, whereas in the limit $r\rightarrow 0$ one finds exponentially slow convergence.\\
The master equation for heat-kernel computations reads
\begin{align}
\operatorname{Tr} f(\Delta)=\frac{1}{(4 \pi)^{\frac{d}{2}}}\left[\mathbf{B}_{0}(\Delta) Q_{2}[f(\Delta)]+\mathbf{B}_{2}(\Delta) Q_{1}[f(\Delta)]\right]+\mathcal{O}\left(\mathcal{R}^{2}\right),
\label{eqn:master-eqn}
\end{align}
with the heat-kernel coefficients
\begin{align}
\mathbf{B}_{n}(\Delta)=\int_x \sqrt{g} \ \operatorname{Tr} \mathbf{b}_{n}(\Delta)
\end{align}
and
\begin{align}
Q_{n}[f(x)]=\frac{1}{\Gamma(n)} \int \dd x \ x^{n-1} f(x).\label{eqn:Qfunc}
\end{align}
\newpage
For computations on $\mathbb{S}^4$, the values for the heat kernel coefficients $\mathbf{B}_n(\Delta)$ are presented in the following:
\begin{table}[H]
\centering
\setlength{\tabcolsep}{5mm}
\setlength\extrarowheight{2mm}
\begin{tabular}{c | c c c}
& TT & TV & S\\ \hline
$\operatorname{Tr} \mathbf{b}_{0}$ & 5 & 3 & 1\\
$\operatorname{Tr} \mathbf{b}_{2}$ & $-\frac{5}{6}\mathcal{R}$ & $\frac{1}{4}\mathcal{R}$& $\frac{1}{6}\mathcal{R}$\\
\end{tabular}
\caption{Heat-kernel coefficients for transverse-traceless tensors (TT), transverse vectors (TV) and scalars (S) for computations on $\mathbb{S}^4$.}
\end{table}
The basic idea of the proof of equation (\ref{eqn:heat-kernel}) is based on the Laplace transform
\begin{align}
f(\Delta) = \int_0^{\infty} \dd s \ \operatorname{e}^{-s\Delta}\tilde{f}(s).
\end{align}
We insert this definition of the Laplace transform into equation (\ref{eqn:heat-kernel}) and find
\begin{align}
\operatorname{Tr} f(\Delta)=\int_{0}^{\infty} \dd s \ \tilde{f}(s) \operatorname{Tr} \operatorname{e}^{-s \Delta}.
\label{eqn:hk2}
\end{align}
The trace on the r.\,h.\,s. is explicitly the trace of the heat-kernel. We expand this term as follows:
\begin{align}
\operatorname{Tr} \operatorname{e}^{-s \Delta}=\frac{1}{(4 \pi)^{\frac{d}{2}}} \sum_{n=0}^{\infty} s^{\frac{n-d}{2}} \mathbf{B}_{n}(\Delta).
\end{align}
This is where the heat-kernel coefficients $\mathbf{B}_n$ become important. We proceed by inserting this expanded version of the heat-kernel trace into equation (\ref{eqn:hk2}) and find:
\begin{equation}
\begin{aligned}
\operatorname{Tr} f(\Delta) &=\frac{1}{(4 \pi)^{\frac{d}{2}}} \sum_{n=0}^{\infty} \mathbf{B}_{n}(\Delta) \int_{0}^{\infty} \dd s \ s^{\frac{n-d}{2}} \tilde{f}(s) \\[10pt]
&=\frac{1}{(4 \pi)^{\frac{d}{2}}} \sum_{n=0}^{\infty} \frac{1}{\Gamma\left(\frac{d-k}{2}\right)} \mathbf{B}_{n}(\Delta) \int_{0}^{\infty} \dd t \ t^{\frac{d-n}{2}-1} f(t) \\[10pt]
&=\frac{1}{(4 \pi)^{\frac{d}{2}}} \sum_{n=0}^{\infty} \mathbf{B}_{n}(\Delta) Q_{\frac{d-n}{2}}[f(t)].
\end{aligned}
\end{equation}
This completes the derivation of the master equation (\ref{eqn:master-eqn}) for heat-kernel computations. Note, that we used the definition of the $Q$-functionals, given in equation (\ref{eqn:Qfunc}) and the relation $\int_{s} s^{-x} \tilde{f}(x)=\frac{1}{\Gamma(x)} \int_{z} z^{x-1} f(z)$.
When investigating matter fields, such as in chapter \ref{chap:Matter}, we often encounter kinetic operators of the form $\tilde{\Delta} = -\nabla^2\cdot\mathbbm{1} + \mathbf{E}$, where $\mathbf{E}$ is a linear map acting on the spacetime and the internal indices of the fields. In this notation, $\mathbbm{1}$ has to be understood as the identity in the respective field space. \\
If $\left[\Delta, \mathbf{E}\right] = 0$\footnote{In the case of $\left[\Delta, \mathbf{E}\right] \neq 0$, there would be additional terms including (higher order) commutators of $\Delta$ and $\mathbf{E}$ due to the Baker-Campbell-Hausdorff formula.}, we can relate the coefficients of the modified Laplacian $\tilde{\Delta}$ and those of the initially considered operator $-\nabla^2$ via
\begin{align}
\operatorname{Tr} \operatorname{e}^{-s\left(-\nabla^{2}+\mathbf{E}\right)}=\frac{1}{(4 \pi)^{\frac{d}{2}}} \sum_{k, l=0}^{\infty} \frac{(-1)^{\ell}}{\ell !} \int_x \sqrt{g} \ \operatorname{Tr} \mathbf{b}_{k}(\Delta) \mathbf{E}^{\ell} s^{k+\ell-2}.
\end{align}
This results in the following, modified values for the coefficients we are interested in:
\begin{equation}
\begin{aligned}
\mathbf{b}_0 &= \mathbbm{1} \\[10pt]
\mathbf{b}_2 &= \frac{\mathcal{R}}{6}\cdot\mathbbm{1} - \mathbf{E}.
\label{eqn:coefficients}
\end{aligned}
\end{equation}
For further study and a more general treatment of the modified Laplacians, including higher order coefficients,\ \cite{CodelloPercacciRahmede2008, Percacci2017} are recommended. | {
"alphanum_fraction": 0.685434425,
"avg_line_length": 82.0490797546,
"ext": "tex",
"hexsha": "01ecc199a0d5e84992a05554846f2ed4f5299e02",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-25T05:06:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-25T05:06:03.000Z",
"max_forks_repo_head_hexsha": "d930ee60ab526835c904252e68272408f3d6a16f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mathieukaltschmidt/BSc-Thesis",
"max_forks_repo_path": "Thesis/appendices/A_maths.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d930ee60ab526835c904252e68272408f3d6a16f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mathieukaltschmidt/BSc-Thesis",
"max_issues_repo_path": "Thesis/appendices/A_maths.tex",
"max_line_length": 392,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "d930ee60ab526835c904252e68272408f3d6a16f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mathieukaltschmidt/BSc-Thesis",
"max_stars_repo_path": "Thesis/appendices/A_maths.tex",
"max_stars_repo_stars_event_max_datetime": "2020-07-22T15:05:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-22T15:05:57.000Z",
"num_tokens": 4674,
"size": 13374
} |
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage[cm]{fullpage}
%\usepackage[spanish,mexico]{babel}
\usepackage{siunitx}
\sisetup{
output-complex-root = \ensuremath{\mathrm{j}},
complex-root-position = before-number
}
\usepackage[inline]{enumitem}
\usepackage{graphicx}
%\usepackage{wrapfig}
\usepackage{hyperref}
\usepackage{datatool}
\usepackage{tabularx}
\usepackage[table]{xcolor}
\usepackage{multirow}
\usepackage{hhline}
\definecolor{Gray}{gray}{0.85}
\definecolor{LightCyan}{rgb}{0.88,1,1}
\newcommand{\Subject}{Circuit analysis II}
\newcommand{\Group}{5A}
\newcommand{\Carrera}{Electrical engineering}
\newcommand{\ExamType}{Homework 3: Nodal and Mesh analysis}
\newcommand{\Date}{4/10/2016}
\newcommand{\MaximumMarks}{10}
\newcommand{\PName}{Dr. Suresh Kumar Gadi}
\makeatletter
% Blank/missing fields commands
% \skipblank adds \\ to filled field; * version adds \space instead of newline
\newcommand\skipblank{\@ifstar\@spskip\@nlskip}
\newcommand\@nlskip[1]{\ifthenelse{\DTLiseq{#1}{}}{\relax}{#1\\}}
\newcommand\@spskip[1]{\ifthenelse{\DTLiseq{#1}{}}{\relax}{#1\space}}
% \checkblank replaces blank fields with ***
\newcommand\checkblank[1]{\ifthenelse{\DTLiseq{#1}{}}{***}{#1}}
\makeatother
\begin{document}
\linespread{2.5}
\DTLloaddb{addresses}{db/questions.csv} % use your actual address database here
\DTLforeach*{addresses}{%
\VarOOA =VarOOA,
\VarOOB =VarOOB,
\VarOOC =VarOOC,
\VarOOD =VarOOD,
\VarOOE =VarOOE,
\VarOOF =VarOOF,
\VarOOG =VarOOG,
\VarOOH =VarOOH,
\VarOOI =VarOOI,
\VarOOJ =VarOOJ,
\VarOOK =VarOOK,
\VarOOL =VarOOL,
\VarOOM =VarOOM,
\VarOON =VarOON,
\VarOOO =VarOOO,
\VarOOP =VarOOP,
\VarOOQ =VarOOQ,
\VarOOR =VarOOR,
\VarOOS =VarOOS,
\VarOOT =VarOOT,
\VarOOU =VarOOU,
\VarOOV =VarOOV,
\VarOOW =VarOOW,
\VarOOX =VarOOX,
\VarOOY =VarOOY,
\VarOOZ =VarOOZ,
\VarOOa =VarOOa,
\VarOOb =VarOOb,
\VarOOc =VarOOc,
\VarOOd =VarOOd,
\VarOOe =VarOOe,
\VarOOf =VarOOf,
\VarOOg =VarOOg,
\VarOOh =VarOOh,
\VarOOi =VarOOi,
\VarOOj =VarOOj,
\VarOOk =VarOOk,
\VarOOl =VarOOl,
\VarOOm =VarOOm,
\VarOOn =VarOOn,
\VarOOo =VarOOo,
\VarOOp =VarOOp,
\VarOOq =VarOOq,
\VarOOr =VarOOr,
\VarOOs =VarOOs,
\VarOOt =VarOOt,
\VarOOu =VarOOu,
\VarOOv =VarOOv,
\VarOOw =VarOOw,
\VarOOx =VarOOx,
\VarOOy =VarOOy,
\VarOOz =VarOOz,
\SName =Name,
\No =No}
{%
\setcounter{page}{1}
%\thispagestyle{empty}
\begin{center}
\begin{minipage}{.15\textwidth}
\begin{flushleft}
\includegraphics[width=\textwidth]{images/uadec-original}
\end{flushleft}
\end{minipage}
\begin{minipage}{.84\textwidth}
\begin{flushright}
{\Huge \textbf{Universidad Autónoma de Coahuila}}\\[2mm]
{\huge Facultad de Ingeniería Mecánica y Eléctrica}\\[2mm]
{\LARGE Unidad Torreón}
\end{flushright}
\end{minipage}
\end{center}
%\\[0.5cm]
\begin{center}
\setlength\doublerulesep{2pt}\doublerulesepcolor{LightCyan}
\begin{tabularx}{\textwidth}{ ||>{\columncolor{Gray}}l|X||>{\columncolor{Gray}}l|r|| }
\hhline{|t==:t:==t|}
Subject & \Subject & Group & \Group \\ \hhline{|:==::==:|}
Degree & \Carrera & Due for & \Date \\ \hhline{|:==::==:|}
Exam / Homework & \ExamType & Registration \# & \textbf{\textit{\No}} \\ \hhline{|:==::==:|}
Professor's name & \PName & Marks Obtained & \underline{\hspace{1cm}} $\Big /10$ \\ \hhline{|:==:b:==:|}
Student's name & \multicolumn{3}{X||}{\textbf{\textit{\MakeUppercase{\SName}}}} \\ \hhline{|b====b|}
\end{tabularx}
\end{center}
\section*{Instructions}
\begin{enumerate}
\item The student should submit the homework on or before the due date. (LATE SUBMISSION = 0 MARKS)
\item Answers should be hand written on the A4 or Letter size bond papers. (20\% of the marks obtained will be reduced)
\item The student should print his/her corresponding question-paper and staple it along with his/her answer sheets. (20\% of the marks obtained will be reduced)
\item In the calculations, the student should maintain at least a precision of 3 decimal places with a correct rounding. (20\% of the marks obtained will be reduced)
\end{enumerate}
\section*{Questions}
\begin{enumerate}
\item Let $Z_1=\SI{{\VarOOA}+j{\VarOOB}}{\ohm}$ and $Z_2=\SI{{\VarOOC}+j{\VarOOD}}{\ohm}$. Calculate the equivalent resistance between the terminals $A$ and $B$ of the circuit shown in Figure 1. (2 points)\\
\begin{minipage}{.20\textwidth}
\centering
\includegraphics[height=2cm]{images/Drawing1}\\
\textbf{Figure 1}
\end{minipage}
\begin{minipage}{.30\textwidth}
\centering
\includegraphics[height=5cm]{images/Drawing2}\\
\textbf{Figure 2}
\end{minipage}
\begin{minipage}{.5\textwidth}
\centering
\includegraphics[height=3.5cm]{images/Drawing3}\\
\textbf{Figure 3}
\end{minipage}
\item Let $Z_1=\SI{{\VarOOE}+j{\VarOOF}}{\ohm}$, $Z_2=\SI{{\VarOOG}+j{\VarOOH}}{\ohm}$, $Z_3=\SI{{\VarOOI}+j{\VarOOJ}}{\ohm}$ and $V=\SI{{\VarOOK}+j{\VarOOL}}{\ohm}$. Using Mesh analysis to find the current flowing through each element shown in the circuit shown in Figure 2. (4 points)\\
\item Let $Z_1=\SI{{\VarOOM}+j{\VarOON}}{\ohm}$, $Z_2=\SI{{\VarOOO}+j{\VarOOP}}{\ohm}$ and $V=\SI{{\VarOOQ}+j{\VarOOR}}{\ohm}$. Using nodal analysis find the current flowing through each element shown in the circuit shown in Figure 3. (4 points)
\end{enumerate}
\clearpage
}
\end{document}
| {
"alphanum_fraction": 0.6597569693,
"avg_line_length": 33.1124260355,
"ext": "tex",
"hexsha": "097fffae438f9733fc151f7468b40d6a76044c8b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f8d9ad0571e600a7cf10ffb04bcae37a9a3ac6f1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "skgadi/courses",
"max_forks_repo_path": "2016-07-12/FIME-5A/Homework003/questions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f8d9ad0571e600a7cf10ffb04bcae37a9a3ac6f1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "skgadi/courses",
"max_issues_repo_path": "2016-07-12/FIME-5A/Homework003/questions.tex",
"max_line_length": 291,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f8d9ad0571e600a7cf10ffb04bcae37a9a3ac6f1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "skgadi/courses",
"max_stars_repo_path": "2016-07-12/FIME-5A/Homework003/questions.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2268,
"size": 5596
} |
\section{Quaternion integration\label{quatProofs}}
Let us first consider a geometric view on quaternions, taken from~\cite{Shoemake:85}. Treat
the four components of a quaternion as Cartesian coordinates of a four-dimensional vector
space. The set of unit quaternions is then the surface of a unit hypersphere (also called a
\emph{glome}~\cite{MathWorld:4D}) in this vector space. Each point on this hypersphere
corresponds to a particular rotation. It also turns out that each pair of
opposite points on this sphere represent exactly the same rotation; hence all possible
rotations are contained in one hemisphere, no matter where the sphere is cut in half.
\subsection{Conservation of magnitude\label{quatIntegrationMagnitude}}
Define the quaternion dot product, in analogy to the 4D vector dot product, to be
\begin{equation}
\q{p}\cdot\q{q} =
(p_w + p_x\qi + p_y\qj + p_z\qk)\cdot (q_w + q_x\qi + q_y\qj + q_z\qk) =
p_w q_w + p_x q_x + p_y q_y + p_z q_z
\end{equation}
The dot product is commutative, contrary to the quaternion juxtaposition product.
The instantaneous rate of change is given~\cite{BaraffWitkin:97,Eberly:04,Saunders:PhD} to be
\begin{eqnarray*}
\dot{\q{q}} & = & \frac{1}{2}\tilde{\ve{\omega}}\q{q} =
\frac{1}{2}(\omega_1\qi + \omega_2\qj + \omega_3\qk)
(q_w + q_x\qi + q_y\qj + q_z\qk) \\*
& = & \frac{1}{2} ( - \omega_1 q_x - \omega_2 q_y - \omega_3 q_z ) +
\frac{\qi}{2} ( \omega_1 q_w + \omega_2 q_z - \omega_3 q_y ) + \\*
&& \frac{\qj}{2} (-\omega_1 q_z + \omega_2 q_w + \omega_3 q_x ) +
\frac{\qk}{2} ( \omega_1 q_y - \omega_2 q_x + \omega_3 q_w )
\end{eqnarray*}
We now treat \q{q} and $\dot{\q{q}}$ as 4D vectors and calculate
their dot product:
\begin{eqnarray*}
\q{q}\cdot\dot{\q{q}} & = & \frac{1}{2} (
- q_w \omega_1 q_x - q_w \omega_2 q_y - q_w \omega_3 q_z
+ q_x \omega_1 q_w + q_x \omega_2 q_z - q_x \omega_3 q_y \\*
&& - q_y \omega_1 q_z + q_y \omega_2 q_w + q_y \omega_3 q_x
+ q_z \omega_1 q_y - q_z \omega_2 q_x + q_z \omega_3 q_w ) \\*
& = & 0.
\end{eqnarray*}
The rate of change is orthogonal to \q{q}, and therefore it is always
a tangent to the sphere, touching it at the point corresponding to \q{q}. The set of all possible
values of $\dot{\q{q}}$ is thus a hyperplane (a three-dimensional subspace) tangential to the
sphere at the point \q{q} in 4D space.
We can determine the magnitude $\norm{\dot{\q{q}}}$ from the sum of squares of the
components given above and find it to be
$\norm{\dot{\q{q}}} = \frac{1}{2}\norm{\ve{\omega}}\,\norm{\q{q}}$. Since we always
require $\q{q}$ to be a unit quaternion, we can reduce this to
\begin{equation}
\label{quatRateOfChangeMagnitude}
\norm{\dot{\q{q}}} = \frac{1}{2}\norm{\ve{\omega}}.
\end{equation}
Now let us determine what happens if we calculate $\q{q} + h\dot{\q{q}}$ for some finite $h$.
Note that this operation is required by all common numerical solvers of differential equations.
Consider the magnitude of the result:
\begin{eqnarray*}
\norm{\q{q} + h\dot{\q{q}}}^2 & = & (\q{q} + h\dot{\q{q}})\cdot(\q{q} + h\dot{\q{q}}) \\
&=& \q{q}\cdot\q{q} + 2h\q{q}\cdot\dot{\q{q}} + h^2\dot{\q{q}}\cdot\dot{\q{q}} \\
&=& 1 + 0 + \frac{h^2}{4}\norm{\ve{\omega}}^2 \\
&>& 1 \quad\quad\mbox{whenever}\quad \norm{\ve{\omega}} > 0.
\end{eqnarray*}
Hence, if the body in question is rotating, it is not possible for a standard numerical ODE solver
to preserve a quaternion's property of unit magnitude.
\subsection{Normalization is not enough\label{quatNormalization}}
One should think that given the derivative of a quaternion \q{q} (equation~\ref{quatRateOfChange},
page~\pageref{quatRateOfChange}), one can find $\q{q}(t + h)$ for some time step $h$ within
the accuracy of the ODE solver employed ($O(h^5)$ error for fourth-order Runge-Kutta).
Unfortunately this is not the case. This shall be demonstrated using Euler's method; it should,
however, be pointed out that more sophisticated methods like RK4 are also affected. Consider the
value of \q{q} at the next time step, $\q{q}(t + h) = \q{q}(t) + h \dot{\q{q}}(t)$. For any
non-zero $h$ and $\dot{\q{q}}$ this point will always lie outside the unit quaternion sphere due
to the orthogonality of \q{q} and $\dot{\q{q}}$. This is usually compensated by renormalizing
the quaternion after the ODE solving step. Geometrically, this renormalization can be understood
as drawing a straight line through the origin and the point $\q{q}(t) + h \dot{\q{q}}(t)$,
intersecting this line with the unit sphere and replacing $\q{q}(t + h)$ by this point of
intersection (see figure~\ref{quatNormalizationFigure}).
\begin{figure}
\psfrag{frag:q}{\q{q}}
\psfrag{frag:qdot}{$\dot{\q{q}}$}
\psfrag{frag:qplusqdot}{$\q{q} + h\dot{\q{q}}$}
\centerline{\includegraphics[width=6cm]{figures/quaternion1}}
\caption{Normalizing a quaternion after performing an ODE solving step.
\label{quatNormalizationFigure}}
\end{figure}
Following the tangent to the sphere is a reasonable approximation to following its curve if the
magnitude of $h \dot{\q{q}}(t)$ is small compared to the curvature of the sphere.
For large time steps or large magnitudes of \ve{\omega}, however, this gets increasingly
erroneous. Consider the limiting case, a body rotating infinitely fast
($\norm{\ve{\omega}} \rightarrow \infty$): after renormalisation, $\q{q}$ will have moved merely
a quarter of the way around the unit sphere, which equates to concatenating the rotation of
quaternion \q{q} with some rotation by $180^\circ$. This is a strictly finite amount of rotation
per time step, while it would actually have been correct to perform an infinite number of
revolutions around the quaternion sphere.
If a polynomial approximation method like RK4 had been used instead of Euler's method, a parametric
polynomial space curve would have been fitted to the surface of the sphere instead of the straight
line. Note however that the Taylor series of the $\sin$ and $\cos$ functions are non-terminating,
and that it is therefore not possible for a finite polynomial curve to lie exactly in the surface
of a sphere. These ODE solvers will therefore suffer the same problems, albeit less pronounced.
\subsection{Corrected quaternion integration\label{quatIntegrationDerivation}}
Assume that the body we are simulating is rotating at a constant angular velocity.
(This assumption is later weakened by the use of a more sophisticated ODE solver,
but for now we will stick with Euler's method.) Furthermore assume without loss of
generality that the body is rotating clockwise about its $x$ axis, which corresponds
to the world's $x$ axis, and that at time $t=0$ the body's frame and the world frame
coincide. Then the orientation of the body (the quaternion describing the linear
transformation from the body's frame of reference to the world frame) is given as a
function of time by
\begin{equation}
\label{quatDerivationExact}
\q{q}(t) = \cos\left(\frac{\norm{\ve{\omega}}t}{2}\right) +
\sin\left(\frac{\norm{\ve{\omega}}t}{2}\right)\qi
\end{equation}
(cf.\ equation~\ref{quatRotation}, figure~\ref{quatIntFig1}) and its angular velocity is
\begin{equation}
\ve{\omega} = (\omega_1, \omega_2, \omega_3)^T = (\norm{\ve{\omega}}, 0, 0)^T
\end{equation}
for some arbitrary $\norm{\ve{\omega}}$, measured in radians per unit time.
\begin{figure}
\psfrag{frag:omegat}{$\frac{\norm{\ve{\omega}} t}{2}$}
\psfrag{frag:qdotoft}{$\dot{\q{q}}(t)$}
\psfrag{frag:real}{Re}
\psfrag{frag:iimag}{$\mathsf{i}$-Im}
\centerline{\includegraphics[width=6cm]{figures/quaternion2}}
\caption{Assumed situation for the derivation in
section~\ref{quatIntegrationDerivation}.\label{quatIntFig1}}
\end{figure}
Now assume w.l.o.g.\ that we take a time step from $t = 0$ to $t = h$.
Then we require that the result returned by Euler's method for $\q{q}(h)$
after renormalization be equal to its exact value in equation~\ref{quatDerivationExact}:
\begin{equation}
\label{quatDerivationSetup}
\cos\left(\frac{\norm{\ve{\omega}} h}{2}\right) +
\sin\left(\frac{\norm{\ve{\omega}} h}{2}\right)\qi =
\frac{\q{q}(0) + h \dot{\q{q}}(0)}
{\norm{\q{q}(0) + h \dot{\q{q}}(0)}}
\end{equation}
We know from examining the 4D geometry that the value assigned to $\dot{\q{q}}$
in equation~\ref{quatRateOfChange} has the correct direction and merely needs to be
corrected in magnitude. In other words, we are searching for a scalar function
$f(h, \norm{\ve{\omega}})$ which will allow $\dot{\q{q}}$ to satisfy
equation~\ref{quatDerivationSetup}:
\begin{equation}
\dot{\q{q}}_h(t) = f\tilde{\ve{\omega}}(t)\q{q}(t)
\end{equation}
Observe that under the above assumptions $\q{q}(0) = 1$, and thus
$\dot{\q{q}}_h(0) = f \norm{\ve{\omega}} \qi$. Substituting this
into equation~\ref{quatDerivationSetup} and considering only the real part:
\begin{eqnarray*}
&& \cos\left(\frac{\norm{\ve{\omega}} h}{2}\right) =
\left[1 + \left( f \norm{\ve{\omega}} h \right)^2 \right]^{-\frac{1}{2}} \\
&\Leftrightarrow&
\left( f \norm{\ve{\omega}} h \right)^2 =
\frac{1}{\cos^2\left(\frac{\norm{\ve{\omega}} h}{2}\right)} - 1 \\
&\Leftrightarrow&
f(h, \norm{\ve{\omega}}) =
\frac{1}{\norm{\ve{\omega}} h} \sqrt{\frac{
1 - \cos^2\left(\frac{\norm{\ve{\omega}} h}{2}\right)}{
\cos^2\left(\frac{\norm{\ve{\omega}} h}{2}\right)}} =
\frac{1}{\norm{\ve{\omega}} h}
\tan\left(\frac{\norm{\ve{\omega}} h}{2}\right)
\end{eqnarray*}
To check, we substitute this result into the $\qi$-imaginary part of
equation~\ref{quatDerivationSetup}:
\begin{eqnarray*}
\sin\left(\frac{\norm{\ve{\omega}} h}{2}\right) & = &
\tan\left(\frac{\norm{\ve{\omega}} h}{2}\right)
\left[ 1 + \tan^2\left(\frac{\norm{\ve{\omega}} h}{2}\right)
\right]^{-\frac{1}{2}} \\
&=& \tan\left(\frac{\norm{\ve{\omega}} h}{2}\right)
\left[ \frac{\cos^2\left(\frac{\norm{\ve{\omega}} h}{2}\right) +
\sin^2\left(\frac{\norm{\ve{\omega}} h}{2}\right) }{
\cos^2\left(\frac{\norm{\ve{\omega}} h}{2}\right) }
\right]^{-\frac{1}{2}} \\
&=& \tan\left(\frac{\norm{\ve{\omega}} h}{2}\right)
\cos\left(\frac{\norm{\ve{\omega}} h}{2}\right) \\
&=& \sin\left(\frac{\norm{\ve{\omega}} h}{2}\right)
\end{eqnarray*}
Thus we establish the validity of this expression for $f$. Observe that by using
L'Hospital's rule, we can find value of $f$ for an infinitesimally small time step:
$$
\lim_{h \to 0} f = \lim_{h \to 0} \frac{ \frac{\norm{\ve{\omega}}}{2}
\cos^{-2}\left(\frac{\norm{\ve{\omega}} h}{2}\right) }{ \norm{\ve{\omega}} } =
\frac{1}{2}
$$
i.e.\ we obtain the original equation~\ref{quatRateOfChange} for the
instantaneous rate of change.
Now let $\Delta \q{q} = h \dot{\q{q}} =
\frac{h}{2} \tilde{\ve{\omega}} \q{q}$.
From equation~\ref{quatRateOfChangeMagnitude} we find that
$\norm{\Delta\q{q}} = \frac{\norm{\ve{\omega}} h}{2}$.
Hence we can simplify the expression for the quaternion correcting factor by expressing it
in terms of $\Delta \q{q}$ as follows:
$$
h f \tilde{\ve{\omega}}\q{q} = \frac{h}{\norm{\ve{\omega}} h}
\tan\left(\frac{\norm{\ve{\omega}} h}{2}\right) \tilde{\ve{\omega}} \q{q} =
\tan\left(\norm{\Delta\q{q}}\right) \frac{\Delta\q{q}}{\norm{\Delta\q{q}}}
$$
This expression now has a clear geometric interpretation with respect to the 4D geometry
(see figure~\ref{quatIntFig2}):
$\norm{\Delta\q{q}}$ is measured in radians, and it corresponds to the \emph{correct}
angle between the old and the new vector $\q{q}$. Since $\q{q}$ and
$\dot{\q{q}}$ are orthogonal, we have a right-angled triangle between the origin,
the old and the new points $\q{q}$, and hence we can use the $\tan$ function to
evaluate the required length of the side in direction $\Delta\q{q}$.
\begin{figure}
\psfrag{frag:real}{Re}
\psfrag{frag:iimag}{$\mathsf{i}$-Im}
\psfrag{frag:q}{\q{q}}
\psfrag{frag:deltaq}{$\Delta\q{q}$}
\psfrag{frag:quergs1}{$\q{q}+\tan(\norm{\Delta\q{q}})\frac{\Delta\q{q}}{\norm{\Delta\q{q}}}$}
\psfrag{frag:quergs2}{$\mathrm{Quergs}(\q{q},\,\Delta\q{q})$}
\centerline{\includegraphics[width=7.7cm]{figures/quaternion3}}
\caption{Illustration of the operation of Quergs.\label{quatIntFig2}}
\end{figure}
Finally we can combine this correction and the subsequent quaternion normalisation into
a single function, which I call Quergs (for \emph{Qu}at\emph{er}nion inte\emph{g}ration
\emph{s}tep)\footnote{This naming follows the spirit of Shoemake~\cite{Shoemake:85}, whose
``Slerp'' function is an `acronym' of \emph{S}pherical \emph{l}inear int\emph{erp}olation.}:
\begin{eqnarray*}
\q{q}(t+h) = \mathrm{Quergs}(\q{q}(t), \Delta\q{q}) &=&
\frac{\q{q}(t) + \tan\left(\norm{\Delta\q{q}}\right)
\frac{\Delta\q{q}}{\norm{\Delta\q{q}}}}{
\norm{\q{q}(t) + \tan\left(\norm{\Delta\q{q}}\right)
\frac{\Delta\q{q}}{\norm{\Delta\q{q}}}}} \\
&=& \frac{\q{q}(t) + \tan\left(\norm{\Delta\q{q}}\right)
\frac{\Delta\q{q}}{\norm{\Delta\q{q}}}}{
\sqrt{1 + \tan^2\left(\norm{\Delta\q{q}}\right)}} \\
&=& \left[\q{q}(t) + \tan\left(\norm{\Delta\q{q}}\right)
\frac{\Delta\q{q}}{\norm{\Delta\q{q}}}\right]
\cos\left(\norm{\Delta\q{q}}\right)
\end{eqnarray*}
The last expression is simplest (and again allows geometric interpretation), but probably
the first of the three expressions is more useful for numerical evaluation, since it involves
only one trigonometric function and minimizes numerical errors.
When implementing this formula, care must be taken around the discontinuities of the $\tan$
function, where numerical instability may occur. These discontinuities are reached whenever a
body performs an odd multiple of half revolutions during a single time step.
| {
"alphanum_fraction": 0.6896729523,
"avg_line_length": 52.0153256705,
"ext": "tex",
"hexsha": "23c2f5fa563b5273dfe89b9fa1479b0294dcad72",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2019-05-08T05:38:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-17T14:39:12.000Z",
"max_forks_repo_head_hexsha": "546b78cec5cf3a83986a94086b97f4236b76df2a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ept/maniation",
"max_forks_repo_path": "report/quatProofs.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "546b78cec5cf3a83986a94086b97f4236b76df2a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ept/maniation",
"max_issues_repo_path": "report/quatProofs.tex",
"max_line_length": 99,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "546b78cec5cf3a83986a94086b97f4236b76df2a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ept/maniation",
"max_stars_repo_path": "report/quatProofs.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-25T00:40:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-09T00:54:43.000Z",
"num_tokens": 4506,
"size": 13576
} |
\section{Experiments}
\label{sec:appendix-experiments}
We complement Section \ref{sec:experiments} with additional experimental results.
In particular, we provide additional qualitative results to better judge the visual
quality of individual superpixel algorithms. Furthermore, we explicitly present \ASA and \UEL
on the \BSDS and \NYU datasets as well as \Rec, \UE and \EV on the \SBD, \SUNRGBD and \Fash datasets.
\input{appendix/experiments-qualitative}
\input{appendix/experiments-quantitative}
\input{appendix/experiments-runtime}
| {
"alphanum_fraction": 0.8089053803,
"avg_line_length": 44.9166666667,
"ext": "tex",
"hexsha": "3ae766dad7281703996e62b3208686dc8d2aa71c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "83e0db95cff91fee26ea04d5ecdb221d441e940b",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "davidstutz/cviu2018-superpixels",
"max_forks_repo_path": "paper/appendix/experiments.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "83e0db95cff91fee26ea04d5ecdb221d441e940b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "davidstutz/cviu2018-superpixels",
"max_issues_repo_path": "paper/appendix/experiments.tex",
"max_line_length": 101,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "83e0db95cff91fee26ea04d5ecdb221d441e940b",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "davidstutz/cviu2018-superpixels",
"max_stars_repo_path": "paper/appendix/experiments.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 133,
"size": 539
} |
\subsection{Indirect branch}
| {
"alphanum_fraction": 0.7741935484,
"avg_line_length": 7.75,
"ext": "tex",
"hexsha": "0c4d6304ecd3cd6d334c9e899f8eb09e217756d1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adamdboult/nodeHomePage",
"max_forks_repo_path": "src/pug/theory/computer/machineCode/02-02-Indirect_branch.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adamdboult/nodeHomePage",
"max_issues_repo_path": "src/pug/theory/computer/machineCode/02-02-Indirect_branch.tex",
"max_line_length": 28,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adamdboult/nodeHomePage",
"max_stars_repo_path": "src/pug/theory/computer/machineCode/02-02-Indirect_branch.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8,
"size": 31
} |
\section{Debugging}
\label{Debugging}
Our experience has been that ease of debugging has often been made
subservient to bragging rights on performance. The ability to debug code
interactively was one of the central goals of the \Q\ project. In this section,
we describe common use cases we found ourselves facing and how these are
addressed.
\be
\item Using an Integrated Development Environment.
Since \Q\ is implemented as a Lua package, the CLION IDE from IntelliJ
with the Lua plugin allows interactive debugging sessions.
\item Sharing sessions. When working in teams, it is often useful to invite
colleagues to ``look over one's shoulder'' and even make changes
directly. To do so, we have embedded Q in an HTTP server and have provided a command
line interface (CLI). When the CLI starts up, one provides the server and port
to which it should connect. As long as the appropriate ports are open, two
developers could issue commands to the server, the commands being serialized at the
server.
\item Save and Restore. Data scientists often need to multiplex between projects
and most development projects are not small enough to be solved in a single
session. The functions {\tt save} and {\tt restore} are handy in this case. {\tt
Save}
causes the evaluation of any partial vectors and writes out meta data about
these to a file. {\tt Restore} reads from that file and brings back to life the
vectors and any meta-data associated with them.
\item Printing variables. Most debuggers allow one to print the value of a
variable. ``print'' is less useful for vectors
which could easily have a billion values. What one typically needs to do
while debugging is to query some {\it property} of the vector. For example, is it
sorted, how many unique values does it have, what is its distribution,
\ldots These are enabled in \Q\ by invoking {\tt save} from the IDE and
{\tt restore} from another session, where these operations can be performed and
then intermediate results (like count of unique values) discarded.
\item Modifying state. In addition to inspecting variables, it is often useful
to modify them so as to continue the debugging session without having to
start over. For example, say that we hit a breakpoint and discover
that we had forgotten to sort a vector or load a table. We
(i) {\tt Save} from the IDE
(ii) {\tt Restore} in a parallel session
(iii) perform the modifications in that session
(iv) {\tt Save} and quit the session
(v) lastly, {\tt Restore} in the IDE.
\ee
| {
"alphanum_fraction": 0.7749106788,
"avg_line_length": 51.4081632653,
"ext": "tex",
"hexsha": "7293986e28f3ac3c46ba4e2ff5bb392b562604cf",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2015-05-14T22:34:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-05-14T22:34:13.000Z",
"max_forks_repo_head_hexsha": "2fb8a2b3636dd11e2dfeae2a6477bd130316da47",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "subramon/qlu",
"max_forks_repo_path": "DOC/Q_PAPER/debugging.tex",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "2fb8a2b3636dd11e2dfeae2a6477bd130316da47",
"max_issues_repo_issues_event_max_datetime": "2020-09-26T23:47:22.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-29T16:48:25.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "subramon/qlu",
"max_issues_repo_path": "DOC/Q_PAPER/debugging.tex",
"max_line_length": 84,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2fb8a2b3636dd11e2dfeae2a6477bd130316da47",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "subramon/qlu",
"max_stars_repo_path": "DOC/Q_PAPER/debugging.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 582,
"size": 2519
} |
%!TEX root = ../thesis.tex
\chapter{Acknowledgements}
\label{ch:acknowledgements}
| {
"alphanum_fraction": 0.756097561,
"avg_line_length": 20.5,
"ext": "tex",
"hexsha": "c9f901e5fd307c38faa619ab422b6b5adc7a8a7a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "17e09d7c28bc0c578e961d35d1c96411ece14bc4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tobinsouth/RandomResources",
"max_forks_repo_path": "Writing/Latex/templates/UofA Thesis Template/front/acknowledgements.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "17e09d7c28bc0c578e961d35d1c96411ece14bc4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tobinsouth/RandomResources",
"max_issues_repo_path": "Writing/Latex/templates/UofA Thesis Template/front/acknowledgements.tex",
"max_line_length": 27,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "17e09d7c28bc0c578e961d35d1c96411ece14bc4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tobinsouth/RandomResources",
"max_stars_repo_path": "Writing/Latex/templates/UofA Thesis Template/front/acknowledgements.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 23,
"size": 82
} |
\documentclass[twoside,symmetric,notoc]{tufte-book}
\usepackage[normalem]{ulem}
% Margins
\geometry{
left=1in, % left margin
bottom=1in,
textwidth=25pc, % main text block
marginparsep=1.5pc, % gutter between main text block and margin notes
marginparwidth=13pc % width of margin notes
}
% Character spacing and hyphenating
\usepackage{microtype}
\usepackage[none]{hyphenat}
\sloppy
% Links
\usepackage{url}
\usepackage{hyperref}
\usepackage[dvipsnames]{xcolor}
\newcommand\myshade{85}
\colorlet{myurlcolor}{MidnightBlue}
\hypersetup{
colorlinks=true,
linkcolor=myurlcolor!\myshade!black,
urlcolor=myurlcolor!\myshade!black,
}
\PassOptionsToPackage{hyphens}{url}
% Figures
\usepackage{graphicx}
\graphicspath{{graphics/}}
\setkeys{Gin}{width=\linewidth,totalheight=\textheight,keepaspectratio}
\usepackage{mwe}
\usepackage{subfig}
% Tables
\usepackage{color, colortbl}
\definecolor{Gray}{gray}{0.9}
\usepackage{booktabs}
% Math
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{wasysym}
% Code
\usepackage{minted}
\usemintedstyle{vs}
% Verbatim
\usepackage{fancyvrb}
\fvset{fontsize=\normalsize}
% Commands for i.e. and e.g.
\usepackage{xspace}
\newcommand{\hairsp}{\hspace{1pt}}
\newcommand{\ie}{\textit{i.\hairsp{}e.,}\hspace{3pt}}
\newcommand{\eg}{\textit{e.\hairsp{}g.,}\hspace{3pt}}
\newcommand{\etal}{\textit{et al.}\xspace}
% Macros for typesetting the documentation
\newcommand{\hangleft}[1]{\makebox[0pt][r]{#1}}
\newcommand{\hquad}{\hskip0.5em\relax}% half quad space
\newcommand{\na}{\quad--}% used in tables for N/A cells
\providecommand{\XeLaTeX}{X\lower.5ex\hbox{\kern-0.15em\reflectbox{E}}\kern-0.1em\LaTeX}
\newcommand{\tXeLaTeX}{\XeLaTeX\index{XeLaTeX@\protect\XeLaTeX}}
% \index{\texttt{\textbackslash xyz}@\hangleft{\texttt{\textbackslash}}\texttt{xyz}}
\newcommand{\tuftebs}{\symbol{'134}}% a backslash in tt type in OT1/T1
\newcommand{\doccmdnoindex}[2][]{\texttt{\tuftebs#2}}% command name -- adds backslash automatically (and doesn't add cmd to the index)
\newcommand{\doccmddef}[2][]{%
\hlred{\texttt{\tuftebs#2}}\label{cmd:#2}%
\ifthenelse{\isempty{#1}}%
{% add the command to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2}}% command name
}%
{% add the command and package to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2} (\texttt{#1} package)}% command name
\index{#1 package@\texttt{#1} package}\index{packages!#1@\texttt{#1}}% package name
}%
}% command name -- adds backslash automatically
\newcommand{\doccmd}[2][]{%
\texttt{\tuftebs#2}%
\ifthenelse{\isempty{#1}}%
{% add the command to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2}}% command name
}%
{% add the command and package to the index
\index{#2 command@\protect\hangleft{\texttt{\tuftebs}}\texttt{#2} (\texttt{#1} package)}% command name
\index{#1 package@\texttt{#1} package}\index{packages!#1@\texttt{#1}}% package name
}%
}% command name -- adds backslash automatically
\newcommand{\docopt}[1]{\ensuremath{\langle}\textrm{\textit{#1}}\ensuremath{\rangle}}% optional command argument
\newcommand{\docarg}[1]{\textrm{\textit{#1}}}% (required) command argument
\newenvironment{docspec}{\begin{quotation}\ttfamily\parskip0pt\parindent0pt\ignorespaces}{\end{quotation}}% command specification environment
\newcommand{\docenv}[1]{\texttt{#1}\index{#1 environment@\texttt{#1} environment}\index{environments!#1@\texttt{#1}}}% environment name
\newcommand{\docenvdef}[1]{\hlred{\texttt{#1}}\label{env:#1}\index{#1 environment@\texttt{#1} environment}\index{environments!#1@\texttt{#1}}}% environment name
\newcommand{\docpkg}[1]{\texttt{#1}\index{#1 package@\texttt{#1} package}\index{packages!#1@\texttt{#1}}}% package name
\newcommand{\doccls}[1]{\texttt{#1}}% document class name
\newcommand{\docclsopt}[1]{\texttt{#1}\index{#1 class option@\texttt{#1} class option}\index{class options!#1@\texttt{#1}}}% document class option name
\newcommand{\docclsoptdef}[1]{\hlred{\texttt{#1}}\label{clsopt:#1}\index{#1 class option@\texttt{#1} class option}\index{class options!#1@\texttt{#1}}}% document class option name defined
\newcommand{\docmsg}[2]{\bigskip\begin{fullwidth}\noindent\ttfamily#1\end{fullwidth}\medskip\par\noindent#2}
\newcommand{\docfilehook}[2]{\texttt{#1}\index{file hooks!#2}\index{#1@\texttt{#1}}}
\newcommand{\doccounter}[1]{\texttt{#1}\index{#1 counter@\texttt{#1} counter}}
% Document parameters
\titleformat{\chapter}[hang]
{\normalfont\huge}{ \thechapter}{1em}{}
\titleformat*{\section}{\itshape\LARGE\bfseries}
\titleformat*{\subsection}{\itshape\Large\bfseries}
\makeatletter
\g@addto@macro{\UrlBreaks}{\UrlOrds}
\makeatother
% List of figures
\makeatletter
\renewcommand*\l@figure{\@dottedtocline{1}{1.5em}{2.3em}}
\makeatother
% List of tables
\makeatletter
\renewcommand*\l@table{\@dottedtocline{1}{1.5em}{2.3em}}
\makeatother
% Abstract
\def\abstractpage{\clearpage
\newgeometry{left=1in,right=1in}
\begin{center}
University of Washington \\
\vspace{0.2in}
\textbf{Abstract} \\
\vspace{0.2in}
The Use of Automatic Identification System Data to Determine Appropriate Stand-On Vessel Maneuvers \\
\vspace{0.2in}
Maureen K. Rowell \\
\vspace{0.20in}
Co-Chairs of the Supervisory Committee:\\
Anne Goodchild, Ed McCormack\\
Civil Engineering\\
\end{center}
\doublespacing
}
\def\endabstractpage{
\restoregeometry
\newpage
}
% Appendix
\usepackage[most]{tcolorbox}
% References
%\usepackage{natbib}
% Glossary
\usepackage[acronym,nomain,nonumberlist]{glossaries}
\makeglossaries
\newacronym{arpa}{ARPA}{Automatic Radar Plotting Aid}
\newacronym{ais}{AIS}{Automatic Identification System}
\newacronym{cog}{COG}{Course over ground}
\newacronym{colregs}{COLREGS}{International Regulations for Preventing Collisions at Sea}
\newacronym{cpa}{CPA}{Closest Point of Approach}
\newacronym{ecdis}{ECDIS}{Electronic Chart Display and Information System}
\newacronym{imo}{IMO}{International Maritime Organization}
\newacronym{ism}{ISM}{International Safety Management}
\newacronym{maib}{MAIB}{Marine Accident Investigation Branch, U.K.}
\newacronym{met}{MET}{Marine Education and Training}
\newacronym{mmsi}{MMSI}{Maritime Mobile Service Identity}
\newacronym{ntsb}{NTSB}{National Transportation Safety Board}
\newacronym{opa}{OPA}{Oil Pollution Act}
\newacronym{pawss}{PAWSS}{Ports and Waterways Safety System}
\newacronym{pwsa}{PWSA}{Ports and Waterways Safety Act}
\newacronym{sms}{SMS}{Safety Management System}
\newacronym{sog}{SOG}{Speed over ground}
\newacronym{solas}{SOLAS}{Safety of Life at Sea}
\newacronym{tss}{TSS}{Traffic Separation Scheme}
\newacronym{unclos}{UNCLOS}{United Nations Convention for the Law of the Sea}
\newacronym{uscg}{USCG}{United States Coast Guard}
\newacronym{vts}{VTS}{Vessel Traffic Service}
\newglossaryentry{give-way vessel}
{
name=give way,
description={The vessel must stay out of the way of the stand-on vessel and avoid crossing ahead of its bow}
}
\newglossaryentry{stand-on vessel}
{
name=stand-on vessel,
description={The vessel is to keep its course and speed}
}
\newglossaryentry{ownship}
{
name=ownship,
description={The vessel with which attributes and other vessels are in reference to}
}
\newglossaryentry{course over ground}
{
name=course over ground,
description={The actual direction of progress of a vessel, between two points, with respect to the surface of the earth}
}
\newglossaryentry{speed over ground}
{
name=speed over ground,
description={The speed of the vessel relative to the surface of the earth}
}
\newglossaryentry{heading}
{
name=heading,
description={The compass direction in which the ship's bow is pointed}
}
\newglossaryentry{bearing}
{
name=bearing,
description={The angle between a ray in the direction of north, whose origin is Point A, and Ray AB, the ray whose origin is Point A and which contains Point B}
}
\setcounter{tocdepth}{1}
\setcounter{secnumdepth}{2}
\setcounter{chapter}{0}
\usepackage[titletoc]{appendix}
% =============================================================================
% TITLE PAGE
% =============================================================================
\title{How Close is Too Close?}
\author{Maureen K. Rowell}
\publisher{University of Washington, Civil Engineering}
% =============================================================================
% DOCUMENT
% =============================================================================
\begin{document}
\pagenumbering{roman}
% =============================================================================
% COPY RIGHT
% =============================================================================
% copyright page
\pagestyle{empty}
\newpage
\begin{fullwidth}
\begin{center}
\setlength{\parindent}{0pt}
\setlength{\parskip}{\baselineskip}
Copyright \copyright\ \the\year\ \\
\thanklessauthor
\end{center}
\end{fullwidth}
% =============================================================================
% ABSTRACT
% =============================================================================
% abstract page
\begin{abstractpage}
\noindent The violation of the International Regulations for Preventing Collisions at Sea (\textsc{colregs}) is a precursor to the majority of collisions. These violations may indicate a poor safety culture within shipping or they may indicate the failure of the \textsc{colregs} to capture the modern ordinary practice of seamen. The \textsc{colregs} are a mix of \textit{rules} and \textit{regulations}. Regulations are a form of explicit, externally applied control; while the text of a rule is ambiguous and requires observing the system it refers to in order to interpret its meaning. In order to observe how Puget Sound mariners interpret the rules of the \textsc{colregs} and whether they violate its regulations, their behavior is observed through the use of automatic identification system data. Give-way vessel behavior is used to quantify the ambiguous terms in the rules and violations of the regulations are used to discover informal rules. The informal rules discovered include ferries crossing traffic separation schemes at non-90 degree angles relative to the traffic separation scheme and passing starboard-to-starboard in head-on encounters. This dissertation contributes to the reproducibility of research by confirming that the findings in the literature agree with what is discovered in the Puget Sound and also discovering local informal rules through the transparent use of public data.
%\end{doublespacing}
%\end{tcolorbox}
\end{abstractpage}
% =============================================================================
% FRONT MATTER
% =============================================================================
\tableofcontents
\listoffigures
\listoftables
% glossary
\glsaddall
\printglossary[type=\acronymtype,title=Glossary]
\mainmatter
\pagenumbering{arabic}
% =============================================================================
% INTRODUCTION
% =============================================================================
\chapter{Motivation}
\label{motivation}
\par{% global economy
Today's world is highly interconnected and reliant on many actors. This interconnectedness can be a benefit, linking natural resources, commodities, capital, information, and labor from around the world to utilize the globe's resources efficiently. Global trade expands the market for the goods a nation produces through exportation and allows a portion of domestic demand to be better met by a foreign nation through importation.\cite{OECD_2011} This economic globalization relies heavily on international shipping, with over 90\% of global trade transported by ship.\cite{MKC} The task of transporting the majority of the world's goods falls on the global merchant fleet, which in 2018 consisted of 94,171 vessels amounting to over 1.9 billion deadweight tonnage (DWT).\cite{UN_2018}
}
\par{% colregs
The objective of the merchant fleet is to safely, reliably, and cost-effectively transport cargo and passengers.\cite{Pinder} While all three conditions are required by the market, the safe operation of a vessel is an international legal requirement dictated in part by the International Regulations for Preventing Collisions at Sea (\textsc{colregs}).\cite{colregs} Collisions are one of the most frequent types of marine casualty, in part, because their avoidance requires coordinated action by more than one vessel.\cite{EMSA} The \textsc{colregs} were established by the International Maritime Organization (IMO) to act as the maritime ``rules of the road'', instructing mariners on how to navigate their vessels so as to avoid collisions.\cite{Benjamin} The \textsc{colregs} consist of 38 rules with rules 4 through 19 applying to the steering and sailing of vessels.\sidenote{The majority of the remainder apply to lights, sounds, and signals.}\cite{USCG} Three important examples are:
\begin{description}
\item[Rule 5 - Lookout] ``Every vessel shall at all times maintain a proper look-out by sight and hearing as well as by all available means appropriate in the prevailing circumstances and conditions so as to make a full appraisal of the situation and of the risk of collision.''
\item[Rule 8a and 8b - Action to Avoid Collision] ``(a) Any action taken to avoid collision shall be taken in accordance with the Rules of this Part and shall, if the circumstances of the case admit, be positive, made in ample time and with due regard to the observance of good seamanship. (b) Any alteration of course and/or speed to avoid collision shall, if the circumstances of the case admit, be large enough to be readily apparent to another vessel observing visually or by radar; a succession of small alterations of course and/or speed should be avoided.''
\item[Rule 17a and 17b - Action by Stand-on Vessel] ``(a) (i) Where one of two vessels is to keep out of the way, the other shall keep her course and speed. (ii) The latter vessel may however take action to avoid collision by her maneuver alone, as soon as it becomes apparent to her that the vessel required to keep out of the way is not taking appropriate action in compliance with these Rules. (b) When, from any cause, the vessel required to keep her course and speed finds herself so close that collision cannot be avoided by the action of the give-way vessel alone, she shall take such action as will best aid to avoid collision.''\cite{USCG}
\end{description}
}
\par{% danger of congested waterways
By definition, a collision must occur between two or more vessels.\cite{MAIB} One obvious contributor to the risk of collision, therefore, is the presence of other vessels. In order to on/off-load cargo, commercial vessels must call at ports and operate in restricted waterways in which they encounter fishing fleets, cruise ships, warships, and recreational vessels, as well as fixed navigational hazards, \eg{rocks, bridges, underwater wrecks}. In 2017, over 40\% of worldwide casualties occurred in port areas while another 29\% occurred in coastal waters.\cite[-0.4in]{EMSA} The presence of many vessels, with different capabilities and purposes, in a relatively small area increases the navigational hazard and decreases the time and space available to make evasive maneuvers.\cite[-0.2in]{Aydogdu}\cite{Mou}
}
\par{% available means
Rule 5 of the \textsc{colregs} states that the navigator must make use of ``all available means'' to avoid collisions. Before the development of modern technology, the navigator's only available means were his eyes and ears to detect hazards and dead reckoning to determine collision probability.\cite{Gurney} Today, the navigator has several additional technologies, such as Automatic Identification System (AIS), radar, automatic radar plotting aid (ARPA), auto pilot, echo sounder, and the electronic chart display and information system (ECDIS). Rule 5 requires more than the presence of technology, though; the navigator must \textit{make use} of all of the information that the technology provides.
}
\par{% information overload
With the advances in on-board technology, one might expect navigation-related incidents to be rare. In 2017, there were 94 total losses --- those in which the cost of repairs exceeds the value of the vessel and cargo --- which continued a 10-year downward trend.\cite[-1.2in]{Allianz_2018} Casualties of all severities, however, were much higher at 3,301.\cite[-0.7in]{EMSA} Human error is consistently credited as the main root cause for the majority (between 75 and 96 percent) of marine casualties.\cite[-0.2in]{Veysey} In 2014 there were three collisions in Singapore within a two week period. The Maritime and Port Authority of Singapore (MPA) found that the bridge teams, which included the captain and the pilot, lacked situational awareness in spite of advisories and warnings from the Port Operations Control Centre. The bridge teams were also not using the available AIS, radar, ARPA, and ECDIS to avoid the collisions in violation of Rule 5 of the \textsc{colregs}.\cite{Schuler} As more and more systems are introduced to the bridge, misinterpretation of complex data and over-reliance on automation are potential reasons mariners cannot make effective use of available technology (more details in Chapter \ref{ch:awareness}).\cite{Allianz_2015}
}
\par{% danger of large vessels
The amount of information is not the only thing in the maritime industry that is always increasing. The concept of economies of scale applies to shipping, and the size of commercial vessels is continually growing to move more cargo in fewer voyages with greater fuel efficiency.\cite{Henrich} The manning of the vessels, however, has not increased proportionately; in fact, the manning has decreased due to advances in on-board technology and an effort to reduce costs.\cite{Pike} Reduced manning puts a greater workload on all crew members and reduces time for training. In addition to the greater demand on the crew, larger vessels physically take longer to complete an evasive maneuver thereby increasing the probability of a collision if a navigational hazard is not detected early enough.\cite{Zhuo}
}
\par{% consequence of large vessel loss to shipowner
If a large vessel is damaged, the ocean carrier's capacity reduces drastically in comparison to a smaller vessel and there is potentially a greater loss of cargo.\cite{Hemly} Allianz marine-risk consultant Captain Rahul Khanna stated in 2015 that the prospect of an incident involving a container vessel similar in severity to the Costa Concordia incident is a major concern. The Costa Concordia salvage operation will cost \$2 billion dollars in total;\cite{Thompson} this is in addition to the loss of 32 lives, the operational loss of the vessel itself, the compensation to be paid to passengers, and the overall negative impact on the cruise industry.\cite{BBC} Commenting on a potential similar incident involving a container ship, salvage operators have estimated ``that it could take two years just to remove the containers from such a large vessel, assuming it were possible at all.''\cite{Millman} In Allianz's 2015 Safety Review, Captain Khanna warned that if vessel size keeps increasing, ``then risk management needs to go back to the drawing board.''\cite{Allianz_2015}
}
\par{% consequence of collision to society
The costs of a casualty extend beyond the shipowners involved to the greater shipping industry, port states, and private individuals. The shipping industry stakeholders include crew who could be seriously injured or killed, other shipowners whose insurance premiums rise, cargo owners who suffer property damage, and the industry in general who is subjected to bad public relations and a potential increase in regulation. Port states may have to pay for the search and rescue efforts in the event of a casualty, pollution prevention measures, pollution cleanup in the event of an oil or chemical spill, the disruptions due to hazards in the nation's navigable waterways, delays to the domestic supply chain, and the cost of increased inspections and detentions in response to the casualty. Private individuals may suffer if the price of their goods increase or their livelihood (\eg{tourism, fishing}) or property is damaged.\cite{SSY}
}
\par{% appropriate anti-collision action
Given that the consequences of a casualty can be large, the \textsc{colregs} state that collisions must be avoided at all costs. The first step in avoiding collision is the development of a voyage plan. To protect life and the environment, the IMO's Safety of Life at Sea (SOLAS) Regulation 34 requires that:\cite{solas}
\vspace{-1ex}
\begin{quotation}
\begin{enumerate}
\item Prior to proceeding to sea, the master shall ensure that the intended voyage has been planned using the appropriate nautical charts and nautical publications for the area concerned, taking into account the guidelines and recommendations developed by the Organization.
\item The voyage plan shall identify a route which:
\begin{enumerate}
\item takes into account any relevant ships' routing systems
\item ensures sufficient sea room for the safe passage of the ship throughout the voyage
\item anticipates all known navigational hazards and adverse weather conditions; and
\item takes into account the marine environmental protection measures that apply, and avoids, as far as possible, actions and activities which could cause damage to the environment.
\end{enumerate}
\end{enumerate}
\end{quotation}
% navigators don't want to deviate from voyage plan
The voyage plan for commercial vessels is designed to be economical, as well as safe. In the ideal world, navigators would guide their vessels along their planned routes with no deviations.\cite{Tsou} When two vessels encounter each other, however, a deviation may be necessary to avoid collision. Once a navigator has decided there is a risk of collision with another vessel, he must decide what action to take.
}
\par{% give way v stand on
The \textsc{colregs} assign responsibilities to each vessel when they are in an overtaking, head-on, and crossing situation. The stand-on vessel has the responsibility to continue with its current speed and course. The other vessel, the give-way vessel, should take action to avoid a collision. The give-way vessel would like to make the smallest deviation necessary to prevent collision, but its action must be, according to Rule 8, made early and large enough to be apparent to the stand-on vessel.\cite{USCG} Taking action as the stand-on vessel is only permitted when it becomes apparent that the give-way vessel is not taking appropriate action.
}
\par{% minimal interference
The \textsc{colregs} assert minimal authority over the give-way vessel's choice of evasive maneuver. The navigator is free to choose whether to alter course, speed, or both and to what degree; when to begin the maneuver; the minimum acceptable passing distance; and when to return to the original course and speed. Two restrictions on this discretion are (1) in a head-on encounter, where both ships are to alter course to starboard for a port-to-port passing and (2) in a crossing encounter, where the give-way vessel is to avoid passing ahead of the stand-on vessel.\cite[-1in]{Plant}
}
\begin{marginfigure}[-2\baselineskip]
\includegraphics[width=0.85\textwidth]{colregpic.jpg}
\caption[When both vessels are similar, deciding which vessel is the stand-on vessel is simply based on the geometry of the encounter]{When both vessels are similar, deciding which vessel is the stand-on vessel is simply based on the geometry of the encounter. Reprinted from \textit{COLREGS: The "rules of the road" for sailors}, by Great Lakes Sailing. Retrieved 18 July 2019 from \url{https://www.great-lakes-sailing.com/colregs.html}}
\label{fig:colregpic}
\end{marginfigure}
\par{% rigid give way stand on designation
The discretion granted by the \textsc{colregs} when deciding an appropriate collision-avoidance maneuver gives way to rigidity when deciding which vessel is to give way and which is to stand on. This determination is based on the geometry of the encounter (see Figure \ref{fig:colregpic}):
\begin{itemize}
\item the overtaking vessel gives way to the vessel being overtaken;
\item the vessel with the other to her starboard gives way to a crossing vessel;
\item and both vessels give way to each other in a head-on encounter.
\end{itemize} The geometry-based algorithm does not allow the speed and maneuverability of the vessels to enter into the decision.\cite{Plant}\sidenote{An exception is a ``vessel restricted in her ability to maneuver''.} The reason geometry alone is used to determine the give-way vessel is to preserve simplicity. In the 1970s, vessels operated at similar speeds with similar maneuverability in relatively uncongested waterways. In this case, the simplest differentiating characteristic of vessels was their relative position and course.
}
\par{% rule v regulation
The strict assignment of give-way vessel can be termed a \textit{regulation} and the indistinct direction to give-way, a \textit{rule}. Regulations are a form of explicit, externally applied control; its text completely defines its interpretation. The text of a rule is ambiguous and requires observing the system it refers to in order to interpret its meaning.\cite{Taylor_1988} For example, the \textsc{colregs} speak only to two-vessel interactions and do not account for the presence of other vessels. The \textsc{colregs} expect a vessel to sequentially avoid a collision with vessels in order of their imminence. However, an action in accordance with the \textsc{colregs} that avoids a collision with one vessel may create or increase a collision risk with a second vessel.\cite{Stitt}\cite{Gottfried} In this case, the navigator must rely on an interpretation of the rules that is consistent with what other mariners would expect of him. At the 1977 Conference on Mathematical Aspects of Marine Traffic, Captain J. E. Bury remarked that:
\begin{quotation}
The navigation of a ship is an intensely personal affair, personal to the Master in particular and to a lesser extent his officers. Not only are all the ships different but the men in charge of them are all different; different in background, experience, attitude and temperament, yet out of all this pot-pourri of variables in innate and acquired skills there is distilled what is known as the `ordinary practice of seamen.'\cite{Bury}
\end{quotation}
The ordinary practice of seamen can be thought of as the agreed upon interpretation of the ambiguous rules --- the patterns of behavior --- that, when necessary, supersedes the regulations. The \textsc{colregs} themselves encourage compliance with the ``social norms'' in Rule 2 which explains that adhering to the \textsc{colregs} will not exonerate a vessel who fails to comply with the ordinary practice of seamen.\cite{USCG} What appears to be a deviation from the \textsc{colregs}, may in fact be ``the use of informal, group rules, which are seen as violations by those on the outside, but as skilled adaptations by those on the inside.''\cite{Hale} A good example of an informal rule supplanting the \textsc{colregs} is that of the Dover Strait ferries.
}
\par{% chauvin and Lardjane
Analysis of 62 crossing interactions between cargo vessels and ferries in the Dover Strait showed that when the give-way vessel is a ferry it makes an evasive maneuver 94\% of the time; on the other hand when the give-way vessel is a cargo vessel it makes an evasive maneuver only 67\% of the time. The probability that the give-way cargo vessel will take the first action increases as the cargo vessel's speed increases. This relates to the maneuverability of the vessel; at slower speeds, reduced water pressure on the rudder makes it harder for large cargo vessels to maneuver. The formal regulation in a crossing situation is that ``the vessel which has the other one on her own starboard side shall keep out of the way'';\cite{USCG} the informal rule uncovered in this study is that the faster vessel shall keep out of the way regardless of the position of the vessels.\cite{Chauvin}
}
\par{% bad violations -> safety culture
While the Dover Strait informal rule violates the \textsc{colregs}, it is predictable and adhered to with the intention of maintaining both safety and efficiency. Without this predictability, their actions would be considered dangerous. One violation of both formal and informal rules Chauvin and Lardjane found was that some give-way ferries in the Dover Strait turn to port and pass ahead, rather than to starboard and pass astern, to save time. Other reasons why the give-way vessel may violate the \textsc{colregs} in a potentially unsafe way are that the vessel has not detected the stand-on ship, made a non-risk assessment, believes itself to be the stand-on vessel, decided to time the maneuver later, or cannot safely make the maneuver. The root cause of these violations can be attributed to, in the majority of cases, ignorance of the danger that is present but rarely comes to pass. Because casualties are so infrequent, unsafe behavior often goes unpunished and mariners become blind to the risk.\cite{Wagenaar} Unsafe violations of the \textsc{colregs}, either deliberate or due to an insufficient understanding, rarely lead to a collision, but failure to observe the \textsc{colregs} is the main cause of collisions. While it is easy to focus the blame and remediation on the individuals involved in casualties, the frequency and nature of human-errors will be influenced by the ship's safety culture where ``consistent procedures represent patterns that reflect the importance and prioritization of safety over competing goals.''\cite{Hetherington} Competing goals might be maintaining the schedule over maintaining safe navigation (see more detail in Chapter \ref{ch:regulation}). In the words of Captain Andrew Kinsey, mariners "are under pressure, take a shortcut once that may not be the safest way to go, and get away with it. This then becomes the norm under stressed conditions."\cite{Allianz_2018}
}
\par{% colregs norms
Collision avoidance is a cooperative task and requires a common understanding of the \textsc{colregs}. In order for other ships and shore-side monitors to identify unsafe situations, all participants need to know what is ``normal'' and what is ``abnormal'' or unsafe behavior with regards to collision avoidance. Additionally, technology that is programmed to aid the navigator must be aware of the informal rules and present pertinent information to the user. Written rules will never be comprehensive and will always require interpretation when applied to specific situations. External research and intervention must be aware of the state of practice of the \textsc{colregs} if their conclusions and recommendations are to be beneficial.
}
% ------------------------------------------------------------------------------------
\section{Research Questions}
\par{% infer rules
The goal of this dissertation is to discern the rules of collision avoidance, specifically Rules 8, 13, 14, 15, and 17 as interpreted by Puget Sound\sidenote{Puget Sound will be used throughout this dissertation to refer to the Puget Sound and Strait of Juan de Fuca} mariners. Rules 13, 14, and 15 will be analyzed for strict compliance, while the ambiguous statements in Rules 8 and 17 will be quantified.
\begin{description}
\item[Rule 8 - Action to Avoid Collision] \hfill \\
\begin{enumerate}[a.]
\item Any action taken to avoid collision shall be taken in accordance with the Rules of this Part and shall, if the circumstances of the case admit, be positive, made in \textbf{ample time} and with due regard to the observance of good seamanship.
\item Any alteration of course and/or speed to avoid collision shall, if the circumstances of the case admit, be \textbf{large enough to be readily apparent to another vessel observing visually or by radar}; a succession of small alterations of course and/or speed should be avoided.
\item Action taken to avoid collision with another vessel shall be such as to result in \textbf{passing at a safe distance}. The effectiveness of the action shall be carefully checked until the other vessel is finally past and clear.
\end{enumerate}
\newpage
\item[Rule 10 - Traffic Separation Schemes] \hfill \\
\begin{enumerate}[a.]
\item This Rule applies to traffic separation schemes adopted by the Organization and does not relieve any vessel of her obligation under any other rule.
\item A vessel using a traffic separation scheme shall:
\begin{enumerate}[(i)]
\item proceed in the appropriate traffic lane in the general direction of traffic flow for that lane;
\item so far as practicable keep clear of a traffic separation line or separation zone
\item normally join or leave a traffic lane at the termination of the lane, but when joining or leaving from either side shall do so at as small an angle to the general direction of traffic flow as practicable.
\end{enumerate}
\item A vessel shall, so far as practicable, avoid crossing traffic lanes but if obliged to do so \textbf{shall cross on a heading as nearly as practicable at right angles to the general direction of traffic flow}.
\end{enumerate}
\item[Rule 13 - Overtaking] \hfill \\
\vspace{0.1in}
Any vessel overtaking any other shall keep out of the way of the vessel being overtaken.
\item[Rule 14 - Head-On Situation] \hfill \\
\vspace{0.1in}
When two power-driven vessels are meeting on reciprocal or nearly reciprocal courses so as to involve risk of collision each shall alter her course to starboard so that each shall pass on the port side of the other.
\item[Rule 15 - Crossing Situation] \hfill \\
\vspace{0.1in}
When two power-driven vessels are crossing so as to involve risk of collision, the vessel which has the other on her own starboard side shall keep out of the way and shall, if the circumstances of the case admit, avoid crossing ahead of the other vessel.
\item[Rule 17 - Action by Stand-on Vessel] \hfill \\
\vspace{0.1in}
Where one of two vessels is to keep out of the way the other shall keep her course and speed. The latter vessel may however take action to avoid collision by her maneuver alone, \textbf{as soon as it becomes apparent to her} that the vessel required to keep out of the way is not taking appropriate action in compliance with these Rules.
\end{description}
}
\subsection*{Problem 1 - How do mariners in the Puget Sound interpret the \textsc{colregs}?}
\par{If during a collision-avoiding interaction, the stand-on vessel is able to maintain its course and speed, I will consider the \textsc{colregs} to be followed. The give-way vessels' maneuvers will then be used to determine the course and/or speed change the stand-on vessel should expect from the give-way vessel. What the stand-on vessel deems to be ``too close'' will be identified by its decision to take an evasive maneuver of its own.
}
\subsection*{Problem 2 - Are informal rules being followed?}
\par{If during collision-avoiding interactions, an action is repeatedly taken in violation of the \textsc{colregs}, such as the stand-on vessel maneuvering first or a starboard-to-starboard passing, I will consider it an informal rule. I will investigate the features of the interactions in search for the conditions in which the rule applies.
}
\subsection*{Problem 3 - What is the nature and frequency of violations?}
\par{If during a collision-avoiding interaction, an action deviates from both the \textsc{colregs} and the informal rules, I will consider it a safety violation. The severity and frequency of violations can serve as a proxy for the safety of maritime traffic and the safety culture of the vessels involved.
}
\section{Contribution}
\par{% Reproducability
In 2019, the Arizona State University Spatial Analysis Research Center held its first ``Replicability and Reproducibility in Geospatial Research'' workshop with the following justification:
\begin{quotation}
As research grows more complex and increasingly reliant on data and software, concerns about replicability will grow rather than diminish. For example, different software packages may produce different results even when the same technique of spatial analysis is applied to the same data or analysis results cannot be reproduced by the same software due to the lack of proper metadata or provenance documenting the spatial processing and parameters used.\cite{ASU}
\end{quotation}
This dissertation contributes to the reproducibility of research by confirming the findings in the literature and discovering local informal rules. Significant changes from the literature review are this dissertation's geographical location, time period, and data source. None of the studies in the literature review took place in the United States or, more specifically, the Puget Sound. Due to the unique features of the Puget Sound area including its ferry traffic and traffic separation schemes, this research is able to speak to the universality of the patterns of behavior regarding evasive maneuvers, passing distances, and informal rules. Many of the studies in the literature use radar surveys and/or simulations to generate data. Reproducing these studies' results using AIS data provided by the United States Coast Guard demonstrates the appropriateness of using AIS data in maritime safety research. The transparent and well-documented use of public data in this dissertation will allow future researchers to reproduce this analysis and identify the affects of the data source, geographic location, and/or methodology.
}
% -------------------------------------------------------------------------------------
\section{Road Map}
\par{%
\begin{description}
\item[Chapter 2] This chapter introduces the shipping industry's long-standing opposition to regulation. The change in navigational patterns and cost of collision between the age of sail and that of the steamship was so great that agreed upon collision rules were necessary. Additional regulation was firmly discouraged until the tragic sinking of the \textit{Titanic} sparked a new age in maritime safety. A large body of international law was created, but its efficacy was limited by its enforcement. Though a ship is under the jurisdiction of the country in which it is registered, ships traverse international seas and the territorial waters of other countries. Those countries visited by enormous ships laden with dangerous cargo may assert their jurisdiction with regard to protecting the environment. Despite both flag and port State regulations, shipping remains prone to accidents. The modern regulatory framework imposes additional self-regulation requirements. The extent to which a shipping company self-regulates is a function of its safety culture. A poor safety culture will lead to unsafe behavior in all aspects including adherence to the \textsc{colregs}.
\item[Chapter 3] This chapter discusses the introduction of automation technology into shipping both on-board and on-shore in an attempt to reduce human error in navigation. The majority of human error relates to poor situational awareness. Technology designed to increase the amount and rate of situational information provided to the mariners is in danger of surpassing human limitations. Technology-assisted collisions are a persistent problem in aviation and maritime navigation due to poor training, over-reliance on technology, and an increase, rather than decrease, in workload. Shore-side support suffers from the same problems, and their authority to direct traffic often goes unused. Human error is unavoidable, and automated systems will always require human supervision, trouble-shooting, and backup. Poor design of technology along with poor management explains why technology may not always increase safety.
\item[Chapter 4] This chapter connects the concept of safety culture to the traffic conflict technique which asserts that numerous traffic conflicts precede rare collisions. Traffic conflicts are identified through traffic rule violation and the presence of evasive maneuvers, while their severity is measured by vessel proximity. Vessel proximity can be measured using ship domain or, more simply, the closest point of approach. A mariner will take evasive action if he believes his ship domain will otherwise be violated. The shape of the ship domain conveys information about mariners' preferences. For instance, if the ship domain is larger on the starboard side than the port side, the mariner feels safer passing port-to-port; passing starboard-to-starboard is in violation of the \textsc{colregs} and would increase liability in case of collision. Traffic rule violations may be due to an informal rule or unsafe behavior depending on the frequency of the violation. An example informal rule is for the faster or more maneuverable vessel to give-way regardless of the \textsc{colregs}.
\item[Chapter 5] This chapter explains the data cleaning process to take MarineCadastre data and process it into a query-able database format.
\item[Chapter 6] This chapter contains the results of the dissertation. Stand-on ferries were found to maneuver around cargo vessels through small changes in course and heading. Ferries were found to cross traffic separation schemes at non-90 degree angles relative to the TSS if doing so takes them off their route. Ferries were also found to pass starboard-to-starboard more frequently than other vessel types.
\end{description}
}
% ====================================================================
% INTRODUCTION TO SHIPPING REGULATION AND SAFETY
% ====================================================================
\chapter{Ship Safety Regulation}
\label{ch:regulation}
\par{% America and UK competed against each other - government stayed out of regulation
When America gained its independence in the late 1700s, Britain gained a new shipping competitor. Each nation was fighting to win its share of the world's trade and each was heavily subsidizing its merchant marine.\cite{Gibson} The shipowners of the day happily accepted the large government subsidies but viewed safety standards as a hindrance to free trade:
\begin{quotation}
The dogma of absolute freedom of competition reigned supreme. It was possible to build a ship more or less whatever way one liked, equip it with whatever instruments one liked, operate it according to whatever standards one liked, and sail it whatever way one liked on any seas.\cite{Boisson}
\end{quotation} It was assumed that the shipowner --- who, on top of the subsidy, committed his personal funds --- would have a vested interest in the safety of the ship. Each government, not wanting to impede its fleet in any way, for the most part happily stayed out of the way, believing that the market would make shipping safe.
}
\par{% marine insurance was thought to ensure safety
The risk involved in shipping was well-known with marine insurance being the oldest recorded form of indemnity.\cite{Noussia} Marine insurance in the 1700s involved an underwriter who assessed the risk of a voyage and then assumed a portion of that risk in return for a set premium paid by the ship or cargo owner. In order to make an informed assessment of the risk, and thereby set the correct premium, the underwriter surveyed the vessel and crew under consideration; in this way, the market established the first form of safety inspections.\cite{Bennet} The insured, however, had an interest in deceiving the underwriter both before and after the policy was written. One owner went so far as to insure his cargo multiple times for an inflated value, secretly unload the cargo, intentionally sink the vessel, and then collect on his insurance policy.\cite{Kingston} Marine insurance, therefore, turned out not to be the avenue through which marine safety could be ensured; it was, in fact, detrimental to safety as it removed the shipowner's vested interest in his vessel.
}
\par{% Britain saw too many wrecks and investigated, still no much regulation
Without government oversight or a market incentive for safe shipping, British citizens demanded an investigation into the cause of its increasing number of shipwrecks,\cite{1838} which in 1820 alone resulted in 20,000 deaths. The Select Committee on the Causes of Shipwrecks carried out an investigation in 1836 and found the causes of marine casualties to be ``defective construction, inadequate equipment, imperfect state of repair, improper and excessive loading, incompetence of masters, drunkenness among officers and crew, and marine insurance which inclined shipowners to disregard safety.''\cite{Boisson} The investigation recommended that a system be established to examine and certify officers. Lobbying by shipowners and a general dislike of government interference in private business kept any legislation from being passed.
}
\begin{marginfigure}
\centering
\includegraphics[width=0.95\textwidth]{trinity1.jpg}
\caption[If both vessels alter course to starboard, the Trinity House rule does not avoid a collision.]{If both vessels alter course to starboard, the Trinity House rule does not avoid a collision. Reprinted from Kemp. (1976). pp. 344.}
\label{fig:trinity1}
\end{marginfigure}
\begin{marginfigure}
\centering
\includegraphics{trinity2.jpg}
\caption[If both vessels do not deem a collision risk to exist and take action, the Trinity House rule causes a collision.]{If both vessels do not deem a collision risk to exist and take action, the Trinity House rule causes a collision. Reprinted from Kemp. (1976). pp. 344.}
\label{fig:trinity2}
\end{marginfigure}
\par{% the start of passenger travel
Due to the dismal safety record of ships at the time, traveling by ship as a passenger was avoided in the early 1800s. It was, however, necessary for immigration to the United States from Europe. Initially, immigrants were transported on sailing vessels that were primarily carrying cargo and mail. With immigration increasing and the invention of the steamship, Britain began building passenger ships in the 1830s. The first steamship to offer regular transatlantic service was the \textit{Great Western} operated by the civil engineer Isambard Kingdom Brunel's Great Western Line.\cite{LeMay} Several other companies began running the transatlantic route in the 1840s and '50s including the Cunard Line, White Star Line, and Collins Line.\cite{Young} The lines competed for business and for the honor of holding the Blue Riband, the award given to the vessel that held the record for the fastest westward Atlantic crossing.\cite{Ujifua}
}
\par{% steam engines created need for collision regulations
The arrival of the steamship fundamentally changed navigational patterns, so much so that the introduction of formal rules was needed to maintain order. In the age of sail, the common rule was that a vessel on a port tack\sidenote{A sailing ship is on a port tack when the wind is coming toward his ship from port side.} gave way to the vessel on a starboard tack by altering course to starboard and passing astern of the other vessel.\cite{Plant} In 1840, the London Trinity House Corporation established a new rule that required two steamships, when on a collision course, to \textit{both} alter course to starboard. This rule was not law but was considered good seamanship by Courts deciding collision cases. A major issue with the Trinity House Rule was that it did not ensure \textit{positive} action, meaning, by following the rule, the distance between the vessels may not necessarily grow (see Figure~\ref{fig:trinity1}). In some cases, following the rule could lead to a collision that would have otherwise not occurred (see Figure~\ref{fig:trinity2}). To correct this dangerous inadequacy, the U.K. Board of Trade drafted a completely new set of rules, known as the Articles, in 1860. The Trinity House rule requiring both steamships to take action was supplanted by the requirement for a stand-on vessel to maintain her speed and course.\cite{Llana}
}
\par{% COLREGS
The Articles were based on the principles of good seamanship and were the foundation for the Collision Regulations, which became international law in 1889 and remained virtually unchanged until 1972.\cite{Kemp}\cite{Mansell} Between 1889 and 1972, the size, speed, and number of vessels increased and the nature of vessel interactions became more complex.\cite{revision} The ordinary practice of seaman naturally evolved to keep pace with the changing conditions. The formal rules, however, fell behind the times and were criticized for being out-of-date, inapplicable to multi-vessel encounters, and lacking in clear direction.\cite{Plant}\cite{Garcaa-Fraas} In 1972, the Convention on the International Regulations for Preventing Collisions at Sea (\textsc{colregs}) was held and a new set of rules was adopted. The 1972 \textsc{colregs} are the rules currently in force around the world today.
}
\par{% no more regulation was allowed
Collision regulations affected a limited, relatively inconsequential, realm of shipping. The rules, ideally meant to prevent collisions, were mainly used to apportion liability once a collision had occurred. If no collision resulted from a rule violation, there was no party to claim damages and, therefore, no penalty was imposed on the offending vessel. Additionally, the cost of following collision regulations was negligible compared to the cost of complying with ship construction regulations. Therefore, shipowners appeased the regulators and welcomed anti-collision rules; any further interference by the government, however, was strongly opposed. U.S. attempts to improve steamboat safety and U.K. efforts to prevent the overloading of vessels each took decades to succeed and demonstrate the resistance within the shipping industry to any form of regulation.
}
\par{% steam engine
In the U.S., steam engines were used long before appropriate safety procedures could be determined, resulting in a barrage of fires and explosions.\cite{Selcer} To attract the masses, steamboat operators on the Mississippi and Ohio Rivers would routinely race each other while carrying too many passengers.\cite{Collier} The steamboat \textit{Sultana} was built to accommodate 376 passengers, but on 27 April 1865, was carrying 2,427 when she exploded and sank, killing over 1,700.\cite[-0.3in]{Huffman} The response to the worst maritime disaster in U.S. history was the 1871 establishment of America's first regulatory agency, the Steamboat Inspection Service.\cite{Voulgaris}
}
\par{% even basic regulation was struggle
In the U.K., understanding the danger of overloading a ship, Samuel Plimsoll fought to require that a ship be marked with a load line beginning in the 1860s.\cite{Hemenway} He was met with great resistance, as, in the words of maritime historian Lincoln Paine, ``greed had replaced disease as the greatest threat to passengers and crew.''\cite{Paine} While the law requiring a visible load line was passed in 1876, shipowners fought against the regulation\cite{Mansell} for another 25 years.\cite{Arnott} This load line, now called the Plimsoll line, became an international requirement in 1930 and is still visible on vessels today.
}
%---------------------------------------------------------------------
\section{The Birth of International Regulation}
\par{% competition led to bigger vessels
The most successful passenger line in the late 1800s and early 1900s was the Cunard Line, which, despite winning the Blue Riband twenty times, claimed that racing across the ocean conflicted with its safety policy. The Cunard Line was synonymous with safety, and its operations, excluding during war time, never resulted in a single loss of life.\cite{Patten}\cite{Sutton} Despite its public dismissal of the award, Cunard --- at the urging and with the financial support of the British government --- immediately began construction on two new ships after the North German Lloyd Line took the Blue Riband away from Cunard in 1898.\cite{Young}\cite{Wiltbank} In 1906, Cunard launched the \textit{Lusitania} and the \textit{Mauretania} which were the world's largest ships and reclaimed the Blue Riband in 1907 and 1909, respectively.\cite{Paine} Not to be left behind, the White Star Line began construction on its \textit{Olympic}-class vessels, including the \textit{Olympic}, \textit{Titanic}, and \textit{Britannic}, in 1908.
}
\par{% titanic disaster
Advertised as the unsinkable ship, the \textit{Titanic} left Southampton on her maiden voyage destined for New York on 10 April 1912.\cite{Flayhart} The official safety policy of the White Star Line stated:
\begin{quotation}
You [the Captain] are to dismiss all idea of competitive passages with other vessels and to concentrate your attention upon a cautious, prudent and ever watchful system of navigation, which shall lose time or suffer any other temporary inconvenience rather than incur the slightest risk which can be avoided.\cite{GBC}
\end{quotation}
On April 14, after several warnings about icebergs from passing ships, the \textit{Titanic} hit an iceberg at full speed and within three hours had sunk to the bottom of the Atlantic resulting in over 1,500 deaths.\cite{Cox} Given the known icy conditions, the immediate cause was that the \textit{Titanic} was sailing too fast and did not maintain a proper lookout. The disaster was exacerbated by flaws in construction, deficiency of lifesaving equipment, and lack of communication regulations.\cite{Senate}
}
\par{% titanic disaster
The \textit{Titanic} was carrying only enough lifeboats to accommodate about 50\% of its passengers.\cite{Peltz} Due to the high transatlantic traffic in the 1910s, it was expected that, in the event of an emergency, lifeboats would be used to ferry passengers from the distressed vessel to another nearby ocean liner, which would use its own lifeboats as well.\cite{Cox} In the case of the \textit{Titanic} there was in fact another vessel nearby, the \textit{Californian}. The \textit{Californian}, which was in a position to potentially save all distressed passengers and crew, rendered no assistance to the \textit{Titanic}. After the radio operator aboard the \textit{Californian} had informed the \textit{Titanic} that she had stopped her engines due to ice, he turned off his radio and went to bed at 23:35 ship time. At 00:55 the crew of the \textit{Californian} saw flares in the sky but did not deem them to be distress signals. As water flowed over the top of the \textit{Titanic}'s insufficiently high watertight bulkheads, no one on the \textit{Californian} checked the wireless radio over which the \textit{Titanic} had been issuing a distress call for over two hours.\cite{Flayhart}
}
\par{% solas
The sinking of the \textit{Titanic} was the pivotal moment in ship safety. It was the beginning of shipping's reactive approach to safety where, after a major incident, an international convention is convened and prescriptive regulations specific to the incident are doled out.\cite{Haapasaari} In the case of the \textit{Titanic}, the name of the reactive convention was the International Convention for the Safety of Life at Sea (SOLAS) convened by the U.K in 1913 and adopted in 1914.\cite{Phillips} The 1914 SOLAS convention was the first international meeting regarding maritime safety standards in history and included technical regulations such as a minimum number of lifeboats, an increase in watertight divisions, and 24 hour wireless radio watches.
}
\par{% economic advantage
The beginning of World War I unfortunately overshadowed the proceedings, and the 1914 convention never entered into force. After the war, individual countries attempted to implement portions of the convention through domestic legislation. For example, the U.K. attempted to increase the required number of watertight divisions in all British-built vessels; this was a broader application of the SOLAS convention's recommendation, which applied only to passenger ships. British shipowners strongly objected, claiming that the increase in cost would halt the construction of British cargo vessels. If safety standards were to be supported by shipowners, all ships in international trade needed to be subject to the same regulations. Otherwise, one nation's fleet, consisting of substandard vessels, would be at a competitive advantage. In 1927, the U.K. invited the maritime nations to a second SOLAS convention to be held in 1929 for the purposes of adopting an updated version of the 1914 convention that would be ratified by all nations creating, for the first time, a uniform body of international maritime law.\cite{Rock}
}
\par{% limited success
The success of the 1929 SOLAS convention was limited, however, due to resistance from the industry. The President of the National Council of American Shipbuilders and delegate to the 1929 convention, Henry G. Smith, reported upon the completion of the convention:\sidenote{Rock. (1929). pp. 121.}
\begin{quotation}
The commercial interests involved in the operation of ships and indirectly in the building of ships had to face this [safety] problem from a different standpoint than those who were considering the question more from an academic standpoint. The question of cost is involved. The American merchant marine, particularly in the foreign trade, is at a distinct disadvantage at the present time because of the higher cost of ships in the United States.
\end{quotation}
The U.S. Congress, believing the 1929 convention was too stringent, did not ratify the treaty until 1936 following public outrage at the 1934 sinking of the \textit{Morro Castle} off the New Jersey coast which left 126 --- mostly women and children --- dead.\cite{Schumacher} With the Great Depression and World War II serving as distractions, the next SOLAS convention was not held until 1948.
}
\par{% establishment of IMO
After its creation in 1945, the United Nations saw the need for an international organization to facilitate the timely adoption of maritime standards and held a maritime conference in 1948 to prepare a convention on the Intergovernmental Maritime Consultative Organization (IMCO). The UN Maritime Conference required that 21 nations become party to the IMCO convention before the organization would be established.\cite{Mitroussi} Therefore, the maritime conference also convened a SOLAS convention to be held in 1948 to promote and hasten the adoption of the IMCO convention. Fears that the IMCO represented only the interests of the dominant maritime nations led to a 10 year delay in its founding. Several nations became party to the convention with the stipulation that they would withdraw if the IMCO attempted to influence ``purely commercial or economic'' matters beyond technical safety.\cite{IMO}
}
\par{% solas 1974
Upon its creation, the IMCO was given responsibility for keeping the SOLAS convention up-to-date and for convening any other necessary conventions. Its first action was to convene a SOLAS convention in 1960 to update the 1948 convention. The 1960 SOLAS convention allowed for the adoption of amendments given positive action by two-thirds of the contracting parties.\cite{Gutierrez} In reality, however, this procedure was highly ineffective with none of the proposed amendments to the 1960 SOLAS convention receiving enough support for adoption. The result was that safety regulations significantly lagged behind changes in technology and operations. When a new SOLAS convention was convened in 1974, the IMCO decided to adopt the ``tacit acceptance procedure'' in which an amendment is adopted unless one-third of contracting parties take negative action within two years.\cite{Lansford} The 1974 SOLAS convention has since been amended nearly 30 times and remains in force today.\cite{Zacharias}
}
%-----------------------------------------------------------------------------------
\section{Issues of Compliance}
\par{% make regulations and shift focus to compliance
The initial purpose of the IMCO was to create a body of international maritime law. From its founding in 1958 through the 1970s, it did so through the adoption of nearly 30 conventions and hundreds of voluntary codes and guidelines that set standards for, \textit{inter alia}, safety of life at sea, load lines, collision avoidance, maritime communications, and pollution at sea,\cite[-0.3in]{Srivastava} to the point that the IMCO ``governs just about every facet of the industry.''\cite{IMO_2016} Once the IMCO had succeeded in getting regulations on the books, it shifted its focus in the 1980s toward correct implementation of and compliance with those regulations.\cite{Mitroussi} The IMCO was renamed the International Maritime Organization (IMO) in 1982 to remove the impression that the conventions were of a consultative, rather than official, nature.
}
\subsection{Flag State Control}
\par{% flag states
As a UN agency, the IMO has no enforcement power; the nations who become party to an IMO convention implement the regulations through domestic law. The original idea was that the nations that have vessels registered under their flag --- flag States --- were to assume the majority of the responsibility for the implementation and enforcement of IMO regulations.\cite{Joyner} The 1982 United Nations Convention of the Law of the Sea (UNCLOS) concluded that:\cite{UNCLOS}
\begin{quotation}
The absence of any authority over ships sailing the high seas would lead to chaos. One of the essential adjuncts to the principle of the freedom of the seas is that a ship must fly the flag of a single State and that it is subject to the jurisdiction of that State.
\end{quotation}
Consequently all vessels conducting commercial activity on the high seas must be registered to exactly one State; this is referred to as a ship's flag. The flag State sets the conditions that vessels under its flag must abide by. All States, even landlocked ones, are allowed to register ships. Shipowners, to a large extent, are free to choose which flag they register under.\cite{Mansell_class}
}
\par{% traditional flag states and closed registries
Traditional maritime powers such as the U.S. operate what are called closed registries, meaning only vessels with legitimate ties to the nation can register under its flag. Closed registries enforce international regulations, impose additional restrictions, and tax the earnings of the vessels under its flag.\cite{Mukherjee} The U.S. registry requires that the shipowner be an American citizen and that the vessel be built in the U.S. and crewed primarily by American citizens.\cite{Papavizas} The result is high costs relating to the seaworthiness of the ship, the working conditions of crew, safety on-board, prevention of marine pollution, and taxes, all of which reduce the competitiveness of U.S.-flagged vessels.\cite{1960} To cut costs, shipowners turn to open registries, a practice known as ``flagging out.''\cite{Yannopoulos}
}
\par{% FOC
Open registries, also called flags of convenience, do not require a legitimate link between the vessel and country of registry and allow shipowners to avoid the high costs of closed registries. An open registry accepts foreign-owned ships, does not tax the ships' revenues, charges low registration fees, does not set construction or manning requirements, and has no administrative means to enforce regulations. Less developed nations, such as Panama and Liberia, use their registries as a major source of income and try to make their flags attractive to shipowners; income from Liberia's ship registry may account for up to 70\% of its official revenue.\cite{Wiswall}\cite{Sharife} In 2018, 29\% of world tonnage was registered under Panama and Liberia,\cite{UN_2018} while only 0.1\% was under the U.S. flag.\cite{DOT} Several studies investigating the relationship between flag and safety have found that vessels under a flag of convenience have a substantially higher rate of casualty than those under a traditional flag.\cite{Boisson}
}
\par{% role of classification societies
Regardless of the registry, all flag States are required to inspect their vessels with regards to construction, navigation and radio equipment, security, and labor laws.\cite{Lagoni} The 1974 SOLAS and 1982 UNCLOS conventions allow a flag State to delegate this function to non-State entities,\cite{solas} generally a classification society. Because (1) it is virtually required for a vessel to be classed in order to be financed and insured and (2) classification societies have the expertise and world-wide reach that State administrations lack, flag States have begun to rely upon classification societies to carry out statutory surveys to ensure that vessels are in compliance with international regulations.\cite{Hosanee}
}
\par{% origin of classification societies
The first classification society, Lloyd's Register of Shipping, was established in 1764 as a way for insurers to gather reliable information regarding shipowners and vessels before underwriting a policy. The classification society would publish a register of all vessels and their rating, based on risk factors, to a limited group of insurance underwriters and brokers. At first, the rating was arrived at through discussions at Lloyd's Coffee House but quickly achieved more sophistication through physical surveys of the vessels.\cite{Boisson_1994} By the end of the 1800s, marine insurers in France, Germany, Italy, Japan, Norway, and the U.S. had established their own classification societies. Since the registers were not shared with shipowners and the subscription fees were paid by underwriters, the classification societies had the appearance of an objective independent party.
}
\par{% shipowner pays for certificate
Shipowners were unhappy with the secretive nature of the rating system. Seeing a new market, classification societies began offering shipowners a certification that, after a detailed survey, would be granted to a vessel and valid for a number of years contingent on satisfactory periodic surveys. The classification society deemed a vessel as either in class or out of class according to the society's rules; if deemed in class, a vessel was eligible for a class-suitable insurance policy. The class certification was a commercial product, and shipowners were free to choose the society offering the best price.\cite{Boisson} The market pressure combined with the fact that the classification societies accept no liability for the safety of the vessels they certify\sidenote{Lagoni, N. I. (2007).} has led the maritime industry to question whether classification societies can effectively enforce safety regulations or if they are contributing to the race to the regulatory bottom.\cite{Talley}
}
\par{
The ability of classification societies to carry out their duties was seriously questioned in 1999 when the Maltese-flagged tanker \textit{Erika} sank off the coast of France, spilling in a single incident the total amount of oil spilled worldwide the previous year. The \textit{Erika} was classed by the Italian classification society RINA, which was --- and is --- a member of the group of leading classification societies, the International Association of Classification Societies (IACS). Inspections carried out in 1998 and 1999 showed severe corrosion, but RINA, nevertheless, granted the \textit{Erika} a certificate of seaworthiness. Later in 1999, when the \textit{Erika} broke in half, RINA came under intense scrutiny and was initially charged criminally. Generally, classification societies include an exemption clause in their contract with a shipowner that exempts them from any liability in the case of a casualty. This was the case for RINA and the \textit{Erika}, but the French authorities found that the contract was nullified by RINA's recklessness and held the classification society civilly liable under French law.\cite{Gahlen} A similar incident occurred in 2002 when the tanker \textit{Prestige}, flagged in the Bahamas and classed by the American Bureau of Shipping (ABS), sank off the coast of Spain. Spain attempted to sue ABS in the U.S. under the International Convention on Civil Liability for Oil Pollution Damage (CLC) but the U.S. is not a signatory to the CLC.\cite{Kearney}
}
\par{The different levels of resources and expertise across flag States and the market forces acting on classification societies have led to varying degrees of implementation and enforcement of IMO regulations around the world.\cite{Mansell_class} The \textit{Erika} and \textit{Prestige} disasters show the variable success a nation effected by an oil spill can achieve in recouping damages after the fact. The next section describes the oil spill that triggered the implementation of a new regime of regulatory enforcement for pollution prevention. Keep in mind, however, that the \textit{Erika} and \textit{Prestige} occurred within this new regime.
}
\subsection{Port State Control}
\par{% torrey canyon
In March 1967, the captain of the fully laden Liberian-flagged supertanker \textit{Torrey Canyon} decided to sail through the dangerous Isles of Scilly. He did so in order to reduce his transit time to Milford Haven, Wales by two hours. The vessel could only enter the harbor at Milford Haven when the tide was high enough to accommodate its large draft;\cite[-1in]{Liberia} if it missed its scheduled arrival, the ship would have to wait five-days for the next favorable tide.\cite[-0.7in]{Hall} The \textit{Torrey Canyon}'s approach to the Isles of Scilly was unknowingly altered overnight by strong currents. Once aware, the crew attempted to correct their course but were impeded by fishing vessels in the area and the autopilot, which had accidentally been left on and was overriding all manual input.\sidenote{Republic of Liberia. (1967).} The \textit{Torrey Canyon} grounded on Sudbury Reef, spilling nearly 32 million gallons of crude oil in the world's first major oil spill.\cite{Cartner}
}
\par{% oil changed flag to port
The \textit{Torrey Canyon} oil spill illustrated the socialization of risk --- that when ships become huge and cargo becomes dangerous, the risk is transferred from the country the vessel registers in to the countries the vessel operates in.\cite{Crittenden} After the incident, the IMO convened the International Convention for the Prevention of Pollution from Ships (MARPOL) that resulted in MARPOL 73/78 entering into force in 1983.\cite{Ozcayir} The countries at risk, termed port States, did not trust that the new regulations would be effectively implemented and enforced by flag States or, by extension, classification societies. Feeling the pain of recent oil spills, fourteen European countries signed the Memorandum of Understanding (MoU) on Port State Control in Paris in 1982.\cite{Ehlers}
}
\par{% port state control description
Port State control (PSC), authorized by UNCLOS 1982 Article 25(2), consists of boarding and inspecting visiting foreign vessels and detaining them until any identified deficiencies are fixed.\cite{Carioua} A single nation cannot effectively implement PSC, however, as the substandard vessel would just divert operations to other nearby port States that were not implementing inspections (``ports of convenience'').\cite{Palma} Therefore regional PSC regimes are necessary with each nation agreeing to inspect a percentage of vessels visiting its ports and to share the results of the inspections with the other nations. The Paris MoU, now consisting of 27 countries, requires its members to inspect 25\% of the vessels that visit their ports. Using the Paris MoU as a model, other regional PSC agreements have been reach around the world.\cite{Hare} The Tokyo agreement requires a 50\% inspection rate;\cite{Yang} countries in poorer areas of the world do not have the resources to support such a high rate and inspect only 10\% of vessels.\cite{MOU_indian}
}
\par{% no requirement to exert PSC
By holding foreign vessels to the standards of the port State, the effect of lax flag State implementation is somewhat ameliorated. Exerting PSC is considered the last safety net and should not be relied on in place of flag State control.\sidenote{IMO. (2016). Port State Control. Retrieved July 16, 2016, from \url{http://www.imo.org/en/OurWork/MSAS/Pages/PortStateControl.aspx}} The \textit{responsibilities} of the flag State are required by IMO convention; the same IMO convention grants \textit{rights}, not responsibilities, to the port State. Therefore, there is no international requirement for a port State to take on any responsibility for the safety of foreign vessels visiting its ports.\cite{Konig}
}
\par{% psc not effective
If a vessel is detained by a port State, the penalty falls only on the shipowner with the cargo owners, broker, shipper, insurer, and flag State facing no fees and/or sanctions. Therefore, a PSC detention does not incentivize others in the industry to eliminate substandard shipping.\cite{SSY} Major costs to the shipowner include the loss of revenue while the vessel is detained and the high cost of urgent repairs that may be even higher if undertaken in an expensive country such as the U.S. where labor costs are high.\cite{Rajadurai} So why does a shipowner risk a PSC detention and its singularly focused penalties? The answer is that continually operating in compliance with regulations costs much more than low-probability detentions.\cite{OECD_2002}
}
\par{% still cause problems with compliant shipowners
The majority of shipowners aim for compliance with domestic and international regulations viewing safety as a long-term benefit to business. Substandard shipowners purposely chose flags of convenience, restrict their operations to poor areas of the world, and apply market pressure on classification societies all to avoid compliance with international safety and pollution prevention regulations. While substandard vessels account for a small proportion of vessels operating today, they exert a strong downward force on the overall safety of the maritime industry.\cite{Boisson}
}
%-----------------------------------------------------------------------------------
\section{Toward Self-Regulation}
\par{% cost saving to be substandard
Because substandard ships save a significant amount of money by avoiding regulations, they are able to offer lower rates.\cite{OECD_2002} The Organization for Economic Co-operation and Development (OECD) conducted a study in 1994 to determine the amount of savings a substandard vessel is able to achieve and found that:\cite{OECD_1996}
\begin{itemize}
\item Compared to a vessel minimally complying with regulations, the savings were over 15\% of daily operating costs.
\item Compared to a vessel operating within standard practice (slightly above minimum compliance), the savings were over 25\% of daily operating costs.
\item Compared to a vessel operating above and beyond compliance, the savings were over 60\% of daily operating costs.
\end{itemize}
The economic advantage gained by substandard vessels drives law-abiding shipowners to operate at minimum compliance in order to remain competitive. While international safety regulations ``represent the highest practicable standards that can reasonably be imposed on the shipping industry'', they should be viewed as the bare minimum required of reputable operators.\cite[-0.6in]{Reynolds} The result is that minimally compliant ships operate at the boundary of safety with little to prevent what Rasmussen calls the ``drift to failure.''\cite[-0.14in]{Rasmussen}
}
\par{% safety culture
The threat of drifting to failure cannot be underestimated as Charles Perrow --- who pioneered the concept of normal accidents --- describes shipping as an error-inducing system, in that:\cite[-0.2in]{Perrow}
\begin{quotation}
Ships operate where most of nature and most of man conspire to ravage them. The navigation rules have developed to aid the courts in finding fault rather than aiding the ships in avoiding accidents; production pressures are often extreme; the working conditions are debilitating\textellipsis the equipment is complex and barely maintained; captains refuse to establish radio contact with a foreign vessel that is about to hit them because they are foreign\textellipsis and there is an authoritarian organization structure that belies the interdependency and complexity of the operators and the system.
\end{quotation}
Whether a shipowner settles for minimum compliance or continually combats the potential for error determines his company's safety culture.
}
\par{
The term ``safety culture'' was coined in the 1980s after the \textit{Chernobyl} accident and can be thought of as ``the way we do things around here.''\cite{CBI} A more elaborate definition is given by the Advisory Committee on the Safety of Nuclear Installations:\cite{HSC}
\begin{quotation}
The safety culture of an organization is the product of individual and group values, attitudes, perceptions, competencies, and patterns of behavior that determine the commitment to, and the style and proficiency of, an organization's health and safety management.
\end{quotation}
Shipping companies can be grouped into three categories of safety cultures: avoidance culture, compliance culture, and safety culture. Companies with an avoidance culture actively search for ways to avoid regulations; they operate substandard ships and use market forces to evade high standards. Companies with a compliance culture meet only the minimum requirements and do so at the lowest cost possible. Those with a safety culture seek to enhance safety and view it as an investment in the company.\cite[-0.75in]{Bhattacharya} Sociology researcher Westrum called these three cultures pathological, bureaucratic, and generative and described the varying ways they treat safety information in Table \ref{tab:culture}.\cite[-0.7in]{Elliott}\cite[-0.15in]{Westrum}
%\begin{fullwidth}
%\begin{center}
\begin{table}
\begin{tabular}{p{3cm}p{3.2cm}p{3cm}}
\hline
\textbf{Pathological} & \textbf{Bureaucratic} & \textbf{Generative} \\
\hline
\hline
Don't want to know & May not find out & Actively seek it \\
\hline
Messengers are "shot" & Messengers are listened to if the arrive & Messengers are trained and rewarded \\
\hline
Responsibility is avoided & Responsibility is compartmentalized & Responsibility is shared \\
\hline
Failure is punished or hidden & Failures lead to local repairs & Failures lead to far reaching reforms \\
\hline
New ideas are discouraged & New ideas often present problems & New ideas are welcomed \\
\hline
\end{tabular}
\vspace{0.1in}
\caption{Safety information is either rejected, tolerated, or welcomed. }
\label{tab:culture}
\end{table}
}
%\end{center}
%\end{fullwidth}
\par{% herald accident
The need for a safety culture in shipping became apparent after the 1987 \textit{Herald of Free Enterprise} capsizing. On March 6 of that year, the car and passenger ferry was readying to leave port in Zeebrugge, Belgium to cross the English Channel. The vessel made four crossings a day and was behind schedule on this particular crossing. The ship was a roll-on/roll-off vessel whose bow doors opened to allow vehicles to drive on and off the ship's car decks. The Zeebrugge dock, however, was not designed for the \textit{Herald}'s class of vessel. To accommodate loading, the \textit{Herald} took on ballast water to lower herself and align with Zeebrugge's loading ramp. Once the vessel was fully loaded, she departed Belgium to return to England.
}
\par{% herald accident
As the ship left the dock, the bow doors remained open. The crew member who was responsible for closing the doors had fallen asleep on break and did not hear the order to prepare for departure. The captain, unaware that the bow doors were open, accelerated to the maximum allowed speed in order to make up time and remain on schedule. With the vessel sitting lower in the water due to the ballast taken on at loading, the open bow doors allowing water onto the car decks, and the high speed of the vessel, only one kilometer out from port, the \textit{Herald} capsized in 90 seconds killing 193 people.\cite{MAIB_investigation}
}
\par{% human error and organizational error
Rather than rushing to blame the \textit{Herald}'s crew, investigators examined the latent conditions that at best failed to prevent and at worst encouraged the crew's errors. The British Department of Transport concluded:\sidenote{U.K. MAIB. (1987). pp. 14.}
\begin{quotation}
A full investigation into the circumstances of the accident led inexorably to the conclusion that the underlying or cardinal faults lay higher up in the organization. The Board of Directors did not appreciate their responsibility for the safe management of their ships \textellipsis From top to bottom the body corporate was infected with the disease of sloppiness.
\end{quotation}
The immediate cause of the disaster was human error. Human error, however, is a consequence of the safety culture within which one works.\cite{Reason_1997} In the 1980s, the IMO began to recognize the need to expand beyond purely technical regulations and to promote improved operating practices.
}
\subsection{The Role of the Shipowner and the Seafarer}
\par{% management responsible
In the same way that it is easy to blame a ship's crew for a disaster, it is easy to blame regulators for a company's lack of compliance. James Reason, a giant in the field of human error, argues that while regulators are accused of ``lax oversight and overly collusive relationships'' with the companies they regulate, they:\sidenote{Reason. (1997). pp. 174.}
\newpage
\begin{quotation}
attempt to penetrate the boundaries of the regulated organizations by requesting certain kinds of information and by making periodic site visits. But these kinds of strategies can only provide isolated glimpses of the organization's activities. Size, complexity, the peculiarities of organizational jargon, the rapid development of technology, and, on occasions, deliberate obfuscation all combine to make it difficult for the regulator to gain a comprehensive and in-depth view of the way in which an organization really conducts its business.
\end{quotation}
So rather than attempt to improve operating practices through prescriptive rules, the IMO broke from tradition and decided to set safety \textit{goals} and leave the precise steps to be taken to reach the goals up to the individual companies.
}
\par{% ISM code
In an effort to integrate on-board and shore-side management, the IMO adopted resolution A.647(16), Guidelines on Management for the Safe Operation of Ships and for Pollution Prevention, better known as the International Safety Management (ISM) Code, in 1989. Initially, the ISM Code was voluntary but became mandatory after it was amended to the SOLAS convention in 1994. The code requires that a shipping company create a safety management system (SMS) for each vessel it operates that includes the policies and procedures the crew and shore-side management should follow in order to comply with international regulations. The SMS must be certified by the flag State of the vessel or a delegated classification society and is then valid for five years.\cite{Boisson}\cite{Anderson}
}
\par{% legal accountability
The code requires the company to appoint a Designated Person Ashore (DPA) who monitors the safety of the vessel and has direct and full access to senior management. The DPA creates an accountability link between the vessel and the management so that if the company is implicated in a fatality or files an insurance claim, the possibility of proving negligence is improved and the criminal and civil consequences are more severe.\cite{Boisson} With the heightened threat of legal liability, the DPA generally requires a large amount of reporting to be completed on-board in order to create a paper trail. Paperwork takes a substantial amount of time away from actual work including mentoring and is seen as a burden by seafarers. Knudsen found that one captain ``admitted filling in forms in accordance with the rules regardless of the facts'' and another who reported that, rather than using the SMS checklists during a shift, ``you do the work, and then you fill in for maybe three days all at once.''\cite{Knudsen}
}
\par{% safety production tradeoff
It is tempting to blame the crew for not completing the forms as intended, but this behavior is a consequence of the safety culture created, in large part, by management. While the DPA requires large amounts of SMS paperwork, the company is also demanding a high level of production from the crew. McLain and Jarrell put this conflict in terms of safety-production compatibility which is the ``perception that a specific amount of effort results in the achievement of multiple goals and a decision to sacrifice some goals to achieve others will not be needed.'' When the company requires increased safety but does not provide additional resources it is assuming that there is extra capacity and that the same level of production can be maintained.\cite{McLain} The minimum number of crew required to man a vessel is determined by the flag State, and, because manning accounts for up to 50\% of operating costs, shipowners flag-out to open registries which have accepted dangerously low minimum manning levels.\cite{Balyk}
}
\par{% tradeoffs
Seafarers, overworked due to cuts in manning, do not have the capacity to take on additional safety tasks, and, therefore, time pressures and a high workload are often the cause of safety violations.\cite{Lawton} The safety-production \textit{incompatibility} requires the crew to continually make trade-offs between safety and completing work tasks. The presence of two conflicting goals and the necessary sacrifice of one for the benefit of the other is called the efficiency- thoroughness trade-off (ETTO).\cite{Hollnagel} After analyzing the \textit{Titanic} and the \textit{Costa Concordia}, Schroder-Hinrichs \etal found that ``the dilemma facing sharp end operators is that they are supposed to be efficient rather than thorough except in cases where the outcome shows that they should have been thorough rather than efficient.''\cite{Schroder-Hinrichs}
}
\par{% routine violations
The ISM Code is often seen by seafarers as legal cover for management rather than a real contribution to safety. Therefore, the worker may perceive the risk involved in a violation of the SMS as small and acceptable to achieve the desired level of production.\cite{Mascini} As deviations become the norm, sacrifices of safety for production often go unrecognized, which leads to a higher level of risk being present than the worker would knowingly accept.\cite{Woods} Normalized routine violations are what fuel the company's drift to failure.
}
\par{% global market and abuse
For their part, seafarers have very little power to affect change in the industry. Open registries allow shipowners to hire crew from around the world, with the cheapest labor coming from developing nations. Many shipowners, even from traditional maritime nations, rely on third-party ship management companies to handle the day-to-day operations of their vessels, including hiring the crew. Third-party ship management companies, which have no financial interest in the ship, may then delegate hiring to recruitment agencies in the labor-supplying nations.\cite[-0.25in]{Walters} In the early 2000s, 22\% of seafarers came from the Philippines while only 0.5\% came from the U.S. The developing nations often do not have the resources and infrastructure to provide high-quality maritime education and training (MET).\cite[-0.3in]{Sampson} Mariners are generally recruited on short term employment contracts not lasting more than a year and face the threat of unemployment if they speak out against working conditions.\sidenote{Walters and Bailey. (2013).} The International Commission on Shipping reported in 2000 that recruitment agencies were abusing seafarers in a number of ways including sharing black-lists of those who had appealed to the International Transportworkers Federation (ITF) --- the seafarer's union.\cite{ICOS}
}
\par{% dissatisfaction
Due to the stressful conditions aboard a vessel, the industry sees turnover rates as high as 35\% and will have an estimated shortfall of over 90,000 seafarers by 2020. This contributes to both the lack of trained seafarers and the missing sense of camaraderie that can increase job satisfaction and performance. The high turnover disincentivizes shipping companies from investing in MET while studies have shown that investment in training and promotion can increase retention.\cite{Yuena} Rotation in crew also results in the loss of ``company knowledge'' and hinders implementation of the company's safety management system.\cite{Bhattacharya}
}
\subsection{Safety Culture and COLREGS Compliance}
\par{% why more accidents don't happen
With the difficulty of enforcing external regulation and the economic disincentive of self-regulation, one might expect to hear about accidents daily. While not widely publicized, there are, in fact, quite a lot of casualties; in 2017, there were a total of 3,301 casualties.\cite{EMSA} The public, however, only hears about major casualties and those with a large loss of life, such as the South Korean ferry \textit{Sewol} which capsized in 2014 killing over 300 people, mostly schoolchildren.\cite{Kim} A disaster of that magnitude requires a series of errors to occur in what Reason termed the Swiss cheese model of accidents. This model describes each step in a process as having the potential for error like a piece of Swiss cheese with holes in it; only when the holes in each layer align will a disaster occur. Each time a vessel commits a safety violation, such as a \textsc{colregs} violation, it is putting more holes in the Swiss cheese and increasing the probability of a large casualty.\cite{Reason}
}
\par{% 300-29-1 ratio
The occurrence of a major accident, when all the holes in the Swiss cheese line up, is relatively rare. The conditions and/or behavior that precipitated the accident, however, are not. H.W. Heinrich conducted a study of industrial accidents and found that, on average, ``in a unit group of 330 accidents of the same kind and involving the same person, 300 result in no injuries, 29 in minor injuries, and 1 in a major lost-time injury.''\cite{Heinrich} This 300-29-1 ratio of accidents suggests that for every \textsc{colregs} violation that results in a collision, there are many more that do not.\cite{Jones}
}
\par{% what turns a near miss into an accident
Motivated to find the precursors of accidents following the \textit{Columbia} disaster, the National Academy of Engineering produced a report on the characteristics of near misses finding that a near miss is kept from becoming an accident if either an exacerbating factor is missing or a mitigating factor is present.\cite{Phimister} The majority of maritime collisions involve a violation of the \textsc{colregs}.\cite{Statheros} A violation of the \textsc{colregs} can contribute to a casualty as the exacerbating factor to a repeated unsafe act or as the repeated unsafe act itself, needing only an additional exacerbating factor or the failure of a mitigating factor.
}
\par{% why arent near misses reported
Recognizing the learning opportunity near misses provide, the ISM Code requires vessels to report near misses to management so that the company can learn from them and correct operations before an accident occurs. It has been reported, however, that near miss reporting is one of the largest failures of the code.\cite{Lappalainen} The lack of near miss reporting is again a product of a poor safety culture; either the crew fears blame and punishment from management or they do not view a near miss as an event worthy of reporting but rather as an unavoidable occurrence.\cite{Cambria} One ECDIS\sidenote{Electronic Chart Display and Information System} and VDR\sidenote{Voyage Data Recorder} provider, Totem Plus, includes an E-navigation Data Auditing (EDA) tool and, in a pilot project, found that a single car-carrier was involved in 18 near-misses in a single month.\cite{Riveria}
}
\par{% navy collisions
The importance of learning from near misses is exemplified by the high-profile June 17, 2017 collision between the USN destroyer \textit{USS Fitzgerald} and the container ship \textit{MV ACX Crystal} that killed 7 sailors.\cite{Faturechi} The immediate cause was human error borne from what an investigation into the incident called ``a culture of complacency.'' Due to a lack of training, the navigation radar had been incorrectly tuned so that nearby ships did not appear on the display and auto-tracking of vessels had been turned off. In addition, the AIS display was only available on a laptop that could not be moved and that constantly crashed.\cite{Ziezulewicz} On the night of the collision, neither vessel was aware of the other until a collision was unavoidable --- in fact, it is believed the \textit{Crystal} was sailing on autopilot and was not aware of the \textit{Fitzgerald} until after the collision.\cite{Martime_Executive} The \textit{Fitzgerald} was not following the area's normal traffic patterns, was not broadcasting on AIS, and attempted no radio contact or sound signals.
}
\par{% near misses
The theory of industrial accidents --- the 300-29-1 pyramid --- would suggest that with the \textit{Fitzgerald} in such a poor state of readiness, a number of near misses and eventually a collision could be expected. In fact, only five weeks prior to the June 17 collision, the \textit{Fitzgerald} was involved in two back-to-back near misses. On the night of May 10, the officer of the deck (OOD)\sidenote{The OOD is the officer in control of the ship in the absence of the captain.} narrowly evaded colliding with a fishing vessel by altering course. Upon deciding a collision risk existed, the OOD failed to sound a warning, and once the danger had passed, she failed to inform the captain of the incident; this was the same OOD that was on duty during the June 17 collision. The following night, May 11, a different OOD had to again evade a fishing vessel but had chosen to alter speed, sound an alarm, and inform the captain. These near-misses were not discussed with the crew, let alone the Navy at large, and the underlying causes were not immediately investigated.\cite{Miller}
}
\par{% systemic issues
Investigating the cause of these near misses may have provided the information necessary to avoid the \textit{Fitzgerald}'s collision and the \textit{USS John S. McCain}'s collision three months later, sparing 17 lives. Additionally, their investigation would have uncovered evidence of fleet-wide issues, such as poor manning, training, and maintenance. Deming's 85-15 rule asserts that only 15\% of the problems the \textit{Fitzgerald} encountered could be blamed on the crew, while the remaining 85\% were systemic issues and the responsibility of management:
\begin{quote}
The supposition is prevalent throughout the world that there would be no problems in production or service if only our production workers would do their jobs in the way we taught. Pleasant dreams. The workers are handicapped by the system and the system belongs to the management.\cite{Manuele}
\end{quote}
Investigations into how a U.S. Navy warship could have such a poor state of readiness soon exposed that the entire surface fleet lacked a safety culture and that senior Navy officials were the driving force. A 2010 report produced for the Fleet Forces commander at the time sounded an early warning sign:
\begin{quote}
The material readiness of the surface force is well below acceptable levels to support reliable, sustained operations at sea and preserve ships to their full service life expectancy...the totality of changes in manpower and manning; training; material readiness; and chain of command oversight caused unintended consequences that have been detrimental to the overall readiness of the surface force...There is limited evidence to identify any changes that were made with surface force readiness as the top priority --- efficiency was sought over effectiveness.\cite{Balisle}
\end{quote}
These concerns were repeatedly raise by a three-star admiral, Thomas Copeman, who was then asked to retire early by the Cheif of Naval Operations. In 2016, the Undersecretary of the Navy expressed concerns over readiness to her boss, Secretary of the Navy Ray Mabus, but was warned not to talk to Congress.\cite{Faturechi}
}
\par{
The \textit{Fitzgerald}'s collision and the safety culture that allowed it to happen demonstrate again the safety-production incompatibility. The Navy, with limited funds, is expected to meet the high operational tempo demanded by the United States' foreign policy while maintaining high levels of training, maintenance, and sailor well-being. The \textit{Fitzgerald} demonstrates that the occurrence of near misses may not only indicate a bad safety culture with regards to navigational safety, but potentially a systemic and dangerous disregard for the perils of seafaring and the constant effort required to overcome them.
}
% ===============================================================================
% SITUATIONAL AWARENESS
% ===============================================================================
\chapter{Situational Awareness}
\label{ch:awareness}
\par{% turn to tech to help
In a majority of marine casualties, human error is a hole in the Swiss cheese of safety. Over 70\% of that human error is related to the lack of situational awareness,\cite{Grech} which is defined as:
\begin{quotation}
the \textit{perception} of the elements in the environment within a volume of time and space, the \textit{comprehension} of their meaning, and the \textit{projection} of their status in the near future [emphasis added].\cite{Endsley}
\end{quotation}
Mariners must be spatially aware of their dynamic location and relationship to the external environment while achieving some goal(s) under time pressure. Navigation in coastal waters is especially demanding since it corresponds with increases in (1) the volume of data the mariner must perceive and comprehend and (2) the number of collision risks the mariners must evaluate and mitigate. The information requirements may include:\cite{Cummings}
\begin{multicols}{2}
\begin{itemize}
\item Current speed and heading
\item Current and expected depth along projected path
\item Current and expected visibility along projected path
\item Current and expected weather and currents
\item Visual navigation lanes
\item Hazardous/restricted areas
\item Planned course
\item Start and final destination or goal location on map
\item Areas where collision is possible or uncertain with obstacles, \eg{shoals, reefs}
\item Location of all surrounding contacts
\item Each contact bearing, speed, and whether course is opening/closing
\item Contact path: past, present, and future
\item Contact location on path
\item Marking to distinguish contacts with Automatic Identification System (AIS) data
\item When and where ownship is on a projected collision course with a contact
\end{itemize}
\end{multicols}
}
\newpage
\par{% not aware of other vessel
A Marine Accident Investigation Branch (MAIB) analysis of accidents from 1994-2003 found that 19\% of vessels were unaware of the other and another 24\% only became aware of the other vessel after collision was unavoidable. This lack of awareness implies the bridge was not maintaining a proper lookout and was improperly using radar in violation of Rule 5 of the \textsc{colregs}. A main issue in both collisions and groundings was fatigue of the sole watchkeeper aboard vessels that were in compliance with safe manning regulations.\cite{MAIB_2004} The use of automation (\eg{radar, automatic radar plotting aid, automatic identification system, electronic chart display and information system, autopilot}) to decrease workload is often used as justification for such low manning levels.
}
\par{% out of the loop
However, rather than decrease the workload, automation introduces the new tasks of constant supervision and occasional intervention by a human that is increasingly left ``out-of-the-loop.'' The discrepancy between a human's understanding of system state and the actual system state is a cause of poor decision-making and will expand as changes and camouflaged failures in the system go unnoticed by the monitor.\cite{Woods_1988} The recognition that automation may reduce situational awareness originated in aviation during the development of the ``glass cockpit''. When advanced automation was introduced into the cockpit, there were several controlled flight into terrain accidents, where pilots who had lost situational awareness flew their planes into the ground not realizing they were doing so.\cite{Ishibash} The maritime navigation bridge is similarly susceptible to technology-assisted casualties. Using technology to fix one pathway to error while simultaneously introducing another is an example of how shipping is a complex and error-inducing system. Since automation is often used as justification for reduce manning it is vital that, as the bridge becomes more and more automated, both designers and management pay special attention to training, worker fatigue, ambiguous collision regulations, and human limitations.
}
\section{Radar}
\par{% detection
A vital aspect of maintaining situational awareness is detecting and tracking other vessels in one's vicinity. In good weather with clear visibility, this can be accomplished through visual lookout.\cite{Oudet} Once visibility is reduced, however, sight becomes insufficient to detect vessels.\cite[.1in]{1966} Before electronic aids to navigation were implemented, operating in fog required vessels to slow to a safe speed and even to a standstill in some cases as determined by the International Rule 16(b), 33 U.S.C. \textsection 1077(b) (1976):
\newpage
\begin{quotation}
A power-driven vessel hearing, apparently forward of her beam, the fog-signal of a vessel the position of which is not ascertained shall so far as the circumstances of the case admit, stop her engines, and then navigate with caution until danger of collision is over.
\end{quotation}
On the North American trade route, which experiences heavy fog, voyages could double, even triple, in length if regulations on safe speed were followed.\cite{Letulle} Due to the negative impact on productivity, it was common knowledge that ``the rule [was] more honored in the breach than in the observance.''\sidenote{Anglo-Saxon Petroleum Co. v. United States, 222 F.2d, 75, 78 (2d Cir 1955).} The merchant fleet needed a way to see through the fog and turned to a new technology that promised to be the ``all seeing eye.''\sidenote{Letulle. pp. 162.}
}
\par{% radars was first example of automation surprise and overreliance
Radar was developed during World War II and was instrumental in detecting enemy airplanes, ships, and submarines. It became available to the merchant fleet at the end of the war for navigation purposes.\cite{Luse} While radar can help in limited visibility, it is also susceptible to clutter\sidenote{the accumulation on the screen of unwanted echoes} which originates from waves and precipitation and can mask the echoes from vessels and fixed hazards. Another limitation of radar is that it only detects objects in the line of sight of the signal; vessels that are below the radar horizon or behind an obstruction will not be detected.\cite{Bole}
}
\par{%
Some merchant mariners thought that radar would bring an end to collisions; much to their surprise, collisions actually increased in the decade following the introduction of commercial radar.\cite{Volk} Perhaps the most interesting fact about this increase was that 78\% of the collisions involved radar-equipped vessels, including the 1956 collision between the \textit{Stockholm} and \textit{Andrea Doria} in what is considered to be the world's first major radar-assisted collision. The collision involved two ocean liners in open sea who had each detected the other on radar. Confident in knowing the other's location, each vessel maintained a relatively high speed. An officer on the \textit{Stockholm}, however, misread his radar scale and believed the \textit{Andrea Doria} to be farther away than she was.\cite[-1in]{PBS} Once aware they were on a collision course, unaided by the 1972 \textsc{colregs}, the \textit{Andrea Doria} turned to port and the \textit{Stockholm} turned to starboard, which closed rather than widen the gap between them. If radar had not been in use, the collision was likely to have never happened.\cite[-0.7in]{Goldstein}
}
\par{% over-reliance on radar
In addition to misunderstanding the radar display, mariners were also guilty of over-relying on radar and disregarding other sources of information.\cite[-0.8in]{Stewart}\cite[-0.3in]{Schmidt} Representative of this danger is the 1947 collision in the North Sea between the \textit{Wilson Victory} and the fishing trawler \textit{The Bucentaur}\sidenote{\textit{The Bucentaur} lost all hands in the collision.}. Because the trawler was low-lying, she was not visible on the \textit{Wilson Victory}'s radar; however, the collision occurred in a popular fishing location. If radar had not been in use, the pilot of the \textit{Wilson Victory} admitted he would have reduced his speed.\cite[-0.3in]{Wood_v_US} The salience of the clear radar display caused the pilot to forget the limitations of his radar and his knowledge of the area. Similarly, the radar operator aboard the \textit{USS Fitzgerald} trusted his clear radar display even though they were sailing through a busy waterway.
}
\par{% radar plotting is time consuming
In the event that the navigator did detected vessels on his radar, using it for collision avoidance required the time-consuming and error-prone plotting of what was called the speed triangle. The speed triangle allowed the navigator to determine the position, course, and speed of the target vessel relative to his, but it had to be computed for every target vessel in the vicinity and after any course/speed alterations. Due to the laborious nature of manual plotting it was often not done completely or not done at all.\cite{Parsons}
}
\par{% arpa invention
On top of plotting, the navigator had to predict the trajectories of the target ships to determine if any collision risk existed. If a collision risk did exist, the navigator had to plan his own evasive maneuver and predict how the target vessels would react in response.\cite{Peterson} In 1969, the automatic radar plotting aid (ARPA) was introduced. The purpose of ARPA is to improve collision avoidance by reducing the workload of radar observers. The technology automatically acquires target ships, completes the plotting calculations, computes trial maneuvers, and displays the information in a user-friendly manner to allow the navigator to maintain situational awareness.\cite{Bole} A decrease in collisions was observed following the mid-1980's IMO mandate requiring that ARPA be fitted on-board vessels.\cite{Tiblin}
}
\par{% arpa assisted collisions
As with radar, there is a potential to over-rely on ARPA's functionality. ARPA predicts the closest point of approach between the ownship and a target ship assuming both keep their same course and speed. Using this information, a navigator may deem there to be no risk of collision and subsequently stop minding the target vessel. The other vessel, however, may have come to a different conclusion. This type of ARPA-assisted collision occurred between the container ship \textit{Werder Bremen} and the tanker \textit{Martina} in 2000. The \textit{Werder Bremen} felt comfortable passing the \textit{Martina} starboard-to-starboard at a distance of 0.5M, but while they paid her no further attention, the \textit{Martina}, believing there to be a risk of head-on collision, maneuvered for a port-to-port passing. The vessels collided and the \textit{Martina} was cut in two, killing 5 crew.\cite[-0.5in]{SMA}
}
\par{% vigilance - royal majesty dead reckoning
Radar and ARPA are meant to give the navigator precise information in a timely manner. However, as exemplified in the previous examples, their use can lead to automation-induced complacency --- the ``psychological state characterized by a low index of suspicion.''\cite{Wiener} But even with a high index of suspicion, it is effectively impossible for a human to maintain vigilance while monitoring a system for low-frequency events for more than half an hour. This vigilance decrement is hugely important, since rather than reduce workload, automation is changing the active role of the human navigator into a passive system monitor. While the phenomenon of vigilance decrement has been known since the 1950s, technology-centered design fails to mitigate its effect. One attempt to aid the monitor is the use of alarms or changes in display.\cite{Parasuraman} An example is a lower limit on the closest point of approach; if a vessel is closer than the limit, an alarm sounds. However, the \textit{MV ACX Crystal} was on autopilot until its collision with the \textit{Fitzgerald}; either its CPA alarm failed to sound or was ineffective in getting the attention of the crew. Recent research into the use of alarms has found that ``alarm fatigue'' causes crew to dismiss alarms without investigation.\cite{Shipowners_club} Additional findings were summarized as follows:
\begin{itemize}
\item 89\% of participants thought false alarms were a problem.
\item 66\% said the alarms were not easily detectable.
\item 57\% of respondents disagreed that alarms are graded by sound.
\item 50\% of participants reported some frustration with the format of the alarms themselves. Of particular concern was the fact that sounds are frequently the same tone for all alarms with no distinguishing factors between alarm systems.
\item 77\% of crew do not want to be disturbed from their watch keeping duties.
\item 24\% of participants reported that they never or seldom engaged the Bridge Navigational Watch Alarm System due to their concerns at frequent false alarms.
\end{itemize}
}
\par{% royal majesty
Another example of both complacency and poor design is the grounding of the cruise ship \textit{Royal Majesty} off the coast of Nantucket Island, Massachusetts in 1995. After leaving Bermuda and turning on autopilot, the officers plotted the vessel's GPS position every hour to ensure she remained on her intended course which was indicated on ARPA. When they arrived at the entrance to the Boston traffic lanes --- which are marked on nautical charts and by buoys with radar beacons --- they expected to see the ``BA'' bouy to their port. The chief officer detected the anticipated buoy on radar and continued on. In fact, the buoy the \textit{Royal Majesty} had passed was the ``AR'' buoy which mark an underwater wreck and was 15 miles west of their intended course.\cite[-0.2in]{Degani}
\begin{figure}
\centering
\includegraphics[width=1\textwidth]{royalmajesty.png}
\caption[The \textit{Royal Majesty} mistook the ``AR'' buoy for the ``BA''. The \textit{Royal Majesty} mistook the ``AR'' buoy for the ``BA''.]{The \textit{Royal Majesty} mistook the ``AR'' buoy for the ``BA''. Reprinted from Degani, A. (2004).}
\label{fig:chart}
\forceversofloat
\end{figure}
\begin{marginfigure}
\centering
\includegraphics{dr.png}
\caption[The ``DR'' signifies that the unit is displaying latitude/longitude based on dead reckoning and not GPS.]{The ``DR'' signifies that the unit is displaying latitude/longitude based on dead reckoning and not GPS. Reprinted from Degani, A. (2004).}
\label{fig:dr}
\end{marginfigure}
\vspace{-0.1in}
What the officers had failed to notice each time they recorded their GPS position was the presence of the small letters ``DR'' on the screen (see Figure \ref{fig:dr}). DR stood for dead reckoning and indicated that the latitude and longitude shown were not based off of GPS but were predictions based off speed and heading. The cable to the GPS antenna has been knocked loose soon after the ship's departure from Bermuda, and the GPS unit had ``gracefully degraded'' to dead reckoning without much alarm. GPS is not the only source of navigational information on-board a modern ship, but the prior reliability of the GPS had caused so much complacency that the crew ignored the sight of unexpected lights --- which marked the Rose and Crown Shoal --- and ignored two fishing boats that had attempted to hail her.\cite{NTSB_1997}
}
\par{% fixation
The crew of the \textit{Royal Majesty} demonstrated the fixation on one source of data to the exclusion of other contradictory information. The same fixation can be seen to overshadow even expert knowledge as demonstrated by Lee and Sanquist who discovered that in simulations of head-on interactions close to shore, where the \textsc{colregs} required the mariner to turn to starboard, ARPA displayed information suggesting that the mariner turn to port -- away from both the other vessel and from shore. Some mariners disregarded the \textsc{colregs} and followed ARPA's suggestion, thereby reestablishing the collision course.\cite[-0.75in]{Lee} ARPA-assisted collisions exemplify how human operators can mistake an automated piece of the system as being and/or displaying the entire system. In order to supply more system information, shore-side support is available in some high traffic ports in the form of vessel traffic systems.
}
% --------------------------------------------------------------------------------------
\section{Vessel Traffic Services}
\par{% radar at ports, Liverpool and Rotterdam back to sit awareness
Ports were early adopters of radar in assisting navigation in reduced visibility from an on-shore location. The first instance was the Harbour Supervision Radar (HSR) at the Port of Liverpool in 1948.\sidenote{The Mersey Estuary had a separate shore-side radar system for the Wallasey Corporation Ferries so that they could maintain their schedules.}\cite{Satow} The objective of the HSR was to assist the ``navigation of a ship through channels and into ports under conditions of bad visibility when limitations of the shipborne radar and existing navigational aids would render the passage or movement of the ship hazardous.''\cite{Fennessy} The Port of Rotterdam was the next to embrace shore-side radar when, in 1956, it installed seven radar stations along the coast of the Nieuwe Waterweg to provide navigation information to pilots when visibility decreased.\cite{Prins} By 1967, the Port of Rotterdam had reduced its fog-related accident rate by 75\% while its traffic density doubled.\cite{Exxon}
}
\par{% san francisco harbor advisory radar project
The main purpose of shore-side radar was to provide information to the mariner. The long-standing law of freedom of navigation had embedded mariners with a sense of autonomy and a strong resistance to any instruction on how to maneuver. For this reason, coordinated shore-side support in maritime traffic lagged far behind that in aviation. But with ports becoming more congested with oil tankers and the 1967 \textit{Torrey Canyon} oil spill, the United States Coast Guard (USCG) installed an experimental Harbor Advisory Radar (HAR) in the San Francisco Bay area in 1969. The HAR was a voluntary service in which the operators were only legally allowed to inform the participating vessels of the position and general direction of the vessels observed by radar, as well as weather conditions. Interpretative information, such as course, speed, and closest point of approach (CPA), was not permitted to be provided. The USCG used the metrics of decreased transit time, improved scheduling, and increased safety to judge the effectiveness of the system. It was estimated that the HAR would save ``1000 ship hours per year, an average of 20 minutes for each U.S. ship calling at San Francisco, and \textellipsis at least \$90,000 (\texttildelow\$600,000 2019 dollars) per year'' from decreases in marine accidents.\cite[-0.5in]{Geonatuics}
}
\par{% collision
In January 1971, two oil tankers --- the \textit{Oregon Standard} and the \textit{Arizona Standard} --- collided in dense fog just west of the Golden Gate Bridge spilling about 840,000 gallons of partially refined oil.\cite[-0.5in]{AP} The \textit{Oregon Standard} was not monitoring the radio channel over which HAR information was broadcast. The \textit{Arizona Standard} was made aware of the \textit{Oregon Standard} by the HAR operator but did not receive any direction on how to avoid a collision.\cite[-0.1in]{NTSB_1971} This event illuminated the inadequacies of the Harbor Advisory Radar and was the impetus of the Ports and Waterways Safety Act (PWSA) of 1972.
}
\par{% VTS origins
PWSA gave the USCG the new responsibility of regulating traffic and the authority to direct vessels through Vessel Traffic Service (VTS) centers.\cite{Rue} Not long after, in 1979, the Government Accountability Office (GAO) released a report critical of the Coast Guard's VTS cost/benefit analysis and recommended that less expensive alternatives be used rather than continuing more sophisticated systems.\cite{GAO_1979} A decade later, the degradation of VTS was found partly to blame for the \textit{Exxon Valdez} oil spill.\sidenote{Roberts, K. H. and Moore, W. H. (1992).} The issue was a familiar one; the VTS radar was not set for the right range and therefore could not track the \textit{Exxon Valdez} during its entire voyage.
}
\par{%
While the Prince William Sound VTS had radar that could monitor the \textit{Exxon Valdez} if correctly configured, the Oil Pollution Act of 1990 required the Prince William Sound VTS to expand their service beyond the radar coverage area.\sidenote{33 U.S.C. 2734} Up until that time, VTS relied on radar and voice communications, both of which have inherent limitations. When tracking a vessel, radar provides no way to correlate the radar observations to a specific vessel; the VTS operator must match the vessel's identity to its radar track through voice communication, which is limited by language and can become ambiguous in high traffic areas.\cite{Lin}
}
\par{% ADS
To fulfil OPA '90 requirements, VTS operators needed (1) a way to detect vessels that was not subject to radar's limitations and (2) a way to positively identify vessels without resorting to voice communications. The USCG developed an automated dependent surveillance (ADS) capability that could obtain a vessel's movement data and transmit it to the VTS when interrogated. Implemented in Prince William Sound in 1994, ADS was independent of radar, voiceless, and correlated a vessel's position with its identity. The transmission included the vessel's latitude, longitude, course, speed, and unique identification number obtained through an on-board differential GPS receiver.\sidenote[][-1.4in]{Differential GPS (dGPS) was the USCG's solution to the U.S. military's degradation of GPS in 1990. The technique uses ground-based reference stations to calculate and transmit the offset built into the military's GPS and restore the system's accuracy for navigation purposes.} Once obtained, the data was transmitted to the VTS over digital selective calling (DSC), which uses digital data rather than voice to increase the range and accuracy of the transmission.\cite[-0.7in]{Radice}\cite{Harre}
}
\par{% DSC capacity problems
Using DSC for the Prince William Sound ADS was appropriate due to the low number of transits occurring.\sidenote{Prince William Sound VTS expected a maximum of four simultaneous transits.} The high number of vessels operating in other geographical areas, however, would overload DSC.\cite{Johnson} To increase the number of messages that could be transmitted on one channel, Sweden proposed a self-organizing time-domain multiple access (SOTDMA) transmission protocol that was accepted by the IMO in 1997; in 1998, the IMO published performance standards for Universal Automatic Identification System (AIS).\cite[-0.5in]{Norris} For a transponder-based system to be called ``AIS'' it must meet these IMO standards.\cite[-0.2in]{TRB} While ADS was one-way and only transmitted information from ship-to-shore, AIS is two-way and allows information exchange between ships, shore stations, and aids to navigation (AtoN). ADS was selective point-to-point calling; AIS is a wide area broadcast.
}
\par{% expansion of VTS
In the late 1990s, Congress directed the USCG to determine the minimum user requirements for VTS.\cite{House} This new effort was named the Ports and Waterways Safety Solution (PAWSS). As part of PAWSS, the USCG assembled a national dialog group consisting of domestic and international maritime and port community stakeholders. The group's objective was to provide guidance on ``(1) the information needs of a mariner to ensure a safe passage, (2) the process that should be used to identify candidate ports for the installation of VTS systems, and (3) the basic elements of a VTS, where such a system is determined to be necessary.''\cite{Marine_Board} With regards to item (3), the group recommended that AIS be the foundation of VTS because it voicelessly provides timely and relevant information.\sidenote{Marine Board, Commission on Engineering and Technical Systems, Division on Engineering and Physical Sciences. (1999).}\cite{GAO_2009}
}
\par{AIS positively identifies vessels and does not require voice communications with ``vessel on my port bow'' or other similarly ambiguously defined vessels to establish the ship's name or call-sign. This capability reduces the demand for voice radio and makes communications quicker and clearer. AIS accomplishes this through the automatic ``exchange of navigational information between suitably equipped vessels and shore stations using distinct messages \textellipsis operating on two designated marine VHF channels.''\cite{NMEA} AIS messages are divided into four types: (1) static data, (2) dynamic data, (3) voyage data, and (4) short safety messages. The category determines the time interval between transmissions as well as the data source.\cite{TRB}
\begin{description}
\item[Static Data] the vessel's IMO number, call-sign, length, beam, type, and location of the position fixing antenna on-board is manually entered only once upon installation; static messages are sent every six minutes.
\item[Voyage Data] the vessel's draft, cargo type, destination, and estimated time of arrival is manually entered once before each voyage; voyage messages are sent every six minutes and any time the data is updated.
\item[Dynamic Data] the vessel's position, course, and speed is obtained through on-board sensors; dynamic messages are sent every 2 seconds to 3 minutes depending on the vessel's course and speed. The ship's latitude, longitude, speed and course over ground, and timestamp are obtained from the main position sensor (usually GPS). The navigation status is manually updated by the navigation officer.
\item[Short Safety Messages] consist of short navigation-related text messages that are sent as needed.
\end{description}
Using AIS in conjunction with radar, VTS centers and ships are able to detect, identify, and track vessels and other navigational hazards.
\begin{figure}
\centering
\includegraphics[width=1.00\textwidth]{preNAIS.jpg}
\caption[AIS coverage in the U.S. as of 1 October 2006.]{AIS coverage in the U.S. as of 1 October 2006. Reprinted from GAO. (2009). GAO-09-337.}
\label{fig:preNAIS}
\forceversofloat
\end{figure}
}
\par{% implementation timeline
In 2000, the IMO amended the Safety of Life at Sea (SOLAS) Convention to mandate that most merchant vessels carry AIS equipment by 2007.\cite{Swedish_Club} The IMO mandate applies only to shipborne AIS equipment; there is no requirement for nations to implement VTS or integrate AIS into existing VTS.\cite{IALA} While not an original objective of AIS, aiding national security became a prominent motivator of its implementation after the 9/11 attacks; in response, the 2002 SOLAS Convention brought the AIS carriage implementation deadline up to 31 December 2004.\cite{FR} This push to hasten the implementation of AIS has been blamed for poor AIS reliability and performance.\cite{Lu}
}
\par{
Today's VTS has four levels of control: monitor, inform, recommend, and direct. A VTS center monitors maritime traffic via radar, AIS, radio, and camera and informs vessels over radio. Most activity is at the levels of monitor and inform, though the VTS in the United States has the authority to direct vessels at any time. However, the National Transportation Safety Board conducted a study on the effectiveness of VTS in 2016 and found that almost all VTS watchstanders that were interviewed reported that they are prohibited from directing a vessel's course or speed and that the internal operating procedures reinforced this misunderstanding. The watchstanders view their role as advisory and do not want to distract mariners with unrequested information and/or recommendations. The main reason for this is the lack of confidence in the watchstanders' experience by both the watchstander himself and the mariners, as USCG personnel rotate every two years and have limited experience at sea.\cite{NTSB_2016}
}
\par{% ovit
VTS watchstanders are, at any one time, monitoring multiple vessels, integrating information from a variety of sources, and communicating relevant information to vessels. They are prone to the same fatigue and complacency as on-board navigators. An example of a VTS-avoidable casualty is that of the grounding of the \textit{Ovit}. The \textit{Ovit} was a chemical tanker who in 2013 was traversing the Strait of Dover, within which lies the Varne Bank. Prior to departing, the navigator of the \textit{Ovit} entered the passage plan into the electronic chart display and information system (ECDIS) which checked the route for safety. The ECDIS did indeed warn that the route contained a grounding danger (see Figure \ref{fig:ovit}), but due to poor design and poor vigilance, the navigator missed the warning.
\vspace{.1in}
\begin{figure}
\centering
\includegraphics[width=1.00\textwidth]{ovit.png}
\caption[The navigator mistook the ``no alerts'' message for safety even though the window was titled ``The selected route is not safe'' and grounding was listed as an alarm.]{The navigator mistook the ``no alerts'' message for safety even though the window was titled ``The selected route is not safe'' and grounding was listed as an alarm. Reprinted from MAIB. (2014).}
\label{fig:ovit}
\end{figure}
}
\par{
The VTS in the Dover Strait, the Channel Navigation Information Service (CNIS), was alerted both audibly and visually when the \textit{Ovit} entered the Varne Bank alerting zone. The operator, however, was communicating with another vessel and distractedly authorized the \textit{Ovit} to be in the zone, which cancelled the alarm. Aboard the \textit{Ovit} a lookout saw the lights but failed to identify the bank, and the ship became grounded. It took 19 minutes for the officer of the watch to realized he was grounded. Initially, he thought there was a problem with the engines. The following conversation occurred between the \textit{Ovit} and the CNIS:
\begin{description}
\item[CNIS] Ovit, Ovit, this is Dover Coastguard, channel 11, over
\item[Ovit] Yes, this is Ovit, go ahead please
\item[CNIS] Ovit, this is Dover Coastguard, according to our radar, sir, you may be on the Varne Bank, is everything OK on board sir?
\item[Ovit] Yes, we have an engine breakdown problem, but I think in 5 minutes it will be OK
\item[CNIS] Roger sir, that is understood, what is your current depth of water, over?
\item[Ovit] My present draught is 7.9m, 7.9m, over
\item[CNIS] Negative sir, what is the under keel clearance, over?
\item[Ovit, after a pause] It's approximately 10m, the under keel clearance
\item[CNIS] Roger sir, this is Dover Coastguard, what is the nature of your engine difficulty over?
\end{description}
This an extreme example of how long it can take an out-of-the-loop monitor to diagnose a failure. Luckily there was little damage, but this casualty demonstrates that technology does not always aid the situational awareness of either the on-board crew or the on-shore monitors.\cite{MAIB_Ovit} It also demonstrates how the crew did not knowingly choose a risky route and were unaware of the danger they were in. The technology knew of the danger but failed to effectively communicate the information to the operator.
}
\par{%
The goal of technology that integrates raw data from multiple sources is to combine that data into relevant and coherent information and then present it so that the operator can make an informed and timely decision.\cite{Holsopple} This requires that the technology understand the context within which the mariner is working, including his limitations.
}
% ============================================================================
% TRAFFIC CONFLICT METHOD
% ============================================================================
\chapter{Literature Review}
\par{% safety pyramid
In the 1950's, research into industrial accidents established the idea that frequent unsafe acts precurse injuries and fatalities.\cite{Heinrich} The theory is that if the contributing factors to the unsafe act are not removed, an accident will eventually occur when an additional factor is present or a mitigating factor is absent. In the 1960's, Perkins and Harris\cite{Perkins} of the General Motors Corporation hypothesized that traffic collisions are preceded by several, less severe, traffic conflicts (see Figure \ref{fig:hyden}). They defined a traffic conflict to be the presence of an evasive maneuver during an interaction between vehicles or a traffic violation by a single vehicle.
}
\begin{figure}
\centering
\includegraphics[width=0.95\textwidth]{collisionPyramid.png}
\caption[Traffic conflicts happen more frequently than fatal accidents.]{Traffic conflicts happen more frequently than fatal accidents. Reprinted from Laureshyn and Varhelyi, (2018).}
\label{fig:hyden}
\end{figure}
\par{% definition
In the United States, intersections account for over 50\% of crashes involving injuries and fatalities\cite{FHWA} and are an obvious place to look for conflicts as, within the intersection, road users have conflicting requirements of the same space. The GM traffic conflict technique defined the following categories for an intersection: left-turn conflict, cross-traffic conflict, rear-end conflict, weave conflict, and red-light violation. Evasive actions consisted of either braking, weaving, or a combination of the two. To generalize the traffic conflict technique (TCT) to other road locations and to other countries, the International Cooperation of Traffic Conflict Techniques Association (ICTCTA) defined a traffic conflict as:
\begin{quotation}
an observable situation in which two or more road users approach each other in space and time to such an extent that there is a risk of collision if their movements remain unchanged.\cite{Amundsen}
\end{quotation}
This definition implies that every traffic conflict will result in a collision if no evasive maneuver is made. The proximity to the collision, in space and time, determines the severity of the conflict. The most commonly used measure is time-to-collision --- the temporal distance between the first evasive maneuver and the impending collision. The earlier the action is taken, the less severe the conflict.\cite{Laureshyn} Another common measure is the deceleration rate, with higher rates of deceleration signifying higher severity conflicts.\cite{Johnsson}
}
\par{% importance of violations
Notably, the ICTCTA definition does not classify rule violations as a traffic conflict. I believe this to be an oversight. Understanding how traffic rules are being violated can led to better system design and better enforcement measures. Informal traffic rules are not taught through education but rather develop as a social norm through repeated observation.\cite{Deehy} If a violation is an informal rule, it may actually be improving safety. On the other hand, several studies have shown that traffic violation convictions predict whether a driver will be involved in a subsequent crash --- even more so than the driver's previous crash history.\cite{Barraclough} An interesting example of the two-sided nature of a violation is the case of red-light running. The Federal Highway Administration conducted a study on the effectiveness of red-light cameras and found that while cross-traffic collisions decreased by 25\%, rear-end collisions increased by 15\%.\cite{FHWA_redlight} The ability of violations to (1) communicate what is expected of other users and (2) identify potentially unsafe human behavior, depending on their frequency, will be utilized in this dissertation.
}
\par{% difference between vehicles and ships
The concept of traffic conflict easily extends to maritime traffic. Dangerous encounters and near misses occur more frequently than collisions; conflicts are identifiable through evasive maneuvers and violations of the \textsc{colregs}. The major difference between applications is the notion of proximity. Vehicles are similar in maneuverability and maintain close proximity during normal operations. Ships on the other hand, require significantly more space and time to maneuver. Rather than attempting to avoid collisions, ships aim to avoid getting ``too close.'' The minimum passing distance that a navigator allows may depend on the size, speed, and maneuverability of his vessel and that of the approaching vessel, as well as the geographical location and environemnt. The size of the passing distance determines the severity of a conflict.\cite{Debnath}
}
\section{Ship Domain}
\label{sec:shipdomain}
\par{% tss
The same year that Perkins and Harris published their traffic conflict technique, the IMO established the Dover Strait traffic separation scheme, the first of its kind. Prior to the implementation of traffic separation schemes, mariners in busy harbors and straits had to rely on the ordinary practice of seamen or make passing arrangements over the radio.\cite{Beattie} Similar to roads, the IMO defines a traffic separation scheme as ``a scheme which separates traffic proceeding in opposite or nearly opposite directions, by the use of a separation zone or line, traffic lanes, or by other means.''\cite[-0.2in]{Llana} To increase safety, the \textsc{colregs} allow nations with high traffic areas to propose a traffic separation scheme to the IMO for approval.\cite{Caminos} When proposing a traffic separation scheme, the IMO instructs Governments to provided information on traffic patterns, volume of traffic, and vessel interactions in the relevant area and to create schemes that require the fewest course alterations.\cite{IMO_guidance} This demand for information about maritime traffic behavior led to the development of the ship domain concept.
}
%\subsection{Ship Domain}
\begin{marginfigure}
\centering
\includegraphics{Fujii.png}
\caption[Fujii and Tanaka predicted that the density of vessels around the ownship go from zero to a local maximum before leveling out.]{Fujii and Tanaka predicted that the density of vessels around the ownship go from zero to a local maximum before leveling out. Reprinted from Fujii and Tanaka \etal (1971). pp. 545.}
\label{fig:fujiiDomain}
\end{marginfigure}
\par{% fujii
In 1971, Fujii and Tanaka\cite{Fujii} investigated the capacity of a one-way channel in Tokyo Bay and observed that ships maintained a following distance between themselves and the vessel in front of them; they called this distance the ``effective domain.'' The authors hypothesized that a vessel repels near ships and attracts distant ships; the repulsion is motivated by avoiding collision, while the attraction of vessels can be explained by common origins, destinations, and trade routes. Fujii and Tanaka defined the boundary of the domain to be where the density of target vessels surrounding the ownship reaches a local maximum (Figure \ref{fig:fujiiDomain}. Using this definition and radar observations, the authors determined that the domain is an ellipse whose dimensions are based on ship length, $L$. This conclusion, however, does not consider head-on or crossing situations. Therefore, the data showed no difference in domain size between port and starboard side.
}
\begin{marginfigure}
\includegraphics{exampleGoodwin.jpg}
\caption[Example of different domain range for each sector.]{Example of different domain range for each sector. Reprinted from Goodwin, pp. 339.}
\label{fig:exampleGoodwin}
\end{marginfigure}
\par{% Importance of \textsc{colregs}
When crossing or encountering a vessel head-on at small distances, adherence to the rules of the road becomes vital. The \textsc{colregs} require that when encountering head-on, vessels should pass port-to-port; therefore, it can be expected that the ship domain would be smaller on the port side. The rules also state that any vessel overtaking is the give-way vessel and the vessel being overtaken is the stand-on vessel; therefore, it can be expected that the ship domain astern would be smaller than in other directions.\cite{USCG} Figure~\ref{fig:exampleGoodwin} shows an example of a domain range that is different for each sector of a vessel.
}
\par{% Goodwin
In 1975, Goodwin\cite{Goodwin} extended the notion of effective domain by adding head-on and crossing situations to her analysis. She broke the circle surrounding the vessel into three sectors:
\newpage
\begin{enumerate}
\item the starboard sector, $0^{\circ} \leq \theta \leq 112.5^{\circ}$,
\item the port sector, $247.5^{\circ} \leq \theta \leq 360^{\circ}$, and
\item the astern sector, $112.5^{\circ} < \theta < 247.5^{\circ}$.
\end{enumerate}
To test the domain's dependence on the \textsc{colregs}, Goodwin gathered data from traffic surveys and radar simulation exercises, with a maximum of four target ships and 0.25nm visibility. Taking each vessel, one at a time, Goodwin plotted the distance and bearing of all target vessels at specific points in time. Superimposing the plots for each ship and each time step --- which was taken to be every six minutes --- Goodwin established the typical distribution of vessels around the ownship as shown in Figure~\ref{fig:goodwinDomain}.
} \begin{marginfigure}
\includegraphics{goodwinDomain.jpg}
\caption[Distribution of other ships around the ownship.]{Distribution of other ships around the ownship. Reprinted from Goodwin, pp. 333.}
\label{fig:goodwinDomain}
\end{marginfigure}
\par{% Goodwin
Goodwin was able to establish the existence of a ship domain by showing that the distribution of target ships around the ownship was not uniform; the number of ships was lower closer to the ownship and greater farther away from the ownship. The point at which the number of ships observed is equal to the number of ships expected given a uniform distribution, point $x_{A}$ in Figure~\ref{fig:presenceOfDomain}, is the boundary of the ship domain; Goodwin calls this distance ``domange.''
}
\begin{figure}
\centering
\includegraphics[width=0.90\textwidth]{uniformAssumption.jpg}
\caption[Schematic distribution of ships, given the presence of a domain, shows fewer ships close to and more ships farther from the ownship than would be expected with uniform density]{Schematic distribution of ships, given the presence of a domain, shows fewer ships close to and more ships farther from the ownship than would be expected with uniform density. Reprinted from Goodwin, pp. 334.}
\label{fig:presenceOfDomain}
\forceversofloat
\end{figure}
\par{% variation
Goodwin found that the domange for the three sectors varied depending on: type of sea area, traffic density, and length of ship. Ship domains were largest in the open ocean where there is unlimited room to maneuver, while the busy Dover Strait showed the smallest ship domains. High traffic density has the same effect as limited maneuvering space in that it forces vessels to pass each other closer than may be desired. Interestingly, Goodwin found that the size of the largest sector -- the starboard sector -- initially increases with ship length but then decreases. She suggests the reason for this non-monotone relationship is that ``large ships expect other ships to keep out of the way and thus take a more passive role in collision avoidance.''\sidenote{Goodwin. pp. 338}
}
\par{% Coldwell
Goodwin's analysis showed that navigators are more comfortable passing at closer distances on the port side compared to the starboard side (Figure~\ref{fig:exampleGoodwin}). Her analysis, however, dealt with waterways that did not place strict constraints on maneuverability. In 1983, Coldwell\cite{Coldwell} continued the investigation into the effects of the \textsc{colregs} on ship domain but this time in the context of restricted waterways. Coldwell defined the ship domain boundary the same way Fujii and Tanaka did --- as the distance from the ownship at which the density of target vessels reaches a local maximum --- and constructed the density of vessels around the ownship at three-minute intervals from 64 hours of radar survey data in the UK's Humber estuary. Bolstering Goodwin's findings, his analysis showed that when encountering another vessel head-on, around 80\% of vessels pass port-to-port and 20\% pass starboard-to-starboard, with the starboard side passing distance being nearly two times larger than that of port. Passing starboard-to-starboard is in violation of Rule 13 of the \textsc{colregs}; Coldwell suggests that because the starboard-to-starboard vessels are violating the rules, and therefore, assuming larger liability, they enlarge their domain. Coldwell also found that the dimensions of the ship domain appear to increase as size of the vessel increases but did not have enough data to support this observation.
}
\begin{marginfigure}
\centering
\includegraphics{davisDomain.jpg}
\caption[The Davis domain preserves the area and weighting of the Goodwin domain but makes the perimeter smooth.]{The Davis domain preserves the area and weighting of the Goodwin domain but makes the perimeter smooth. Reprinted from Davis \etal (1980). pp. 216.}
\label{fig:davisDomain}
\end{marginfigure}
\par{% Davis
As the motivation for defining the ship domain was to evaluate traffic separation schemes, Davis \etal\cite[.35in]{Davis} developed a ship domain that could be more easily used in marine traffic simulation models. The authors started with the logic behind Goodwin's domain model but found that the discontinuities at the boundaries of the domain sectors presented computational challenges. Davis \etal decided to transform the Goodwin domain to a circle with constant radius while preserving the total domain area rather than the domange shape. The center of the circle is off-set from the ownship so that the weighting of the port, starboard, and stern sides of the domain is also preserved (see Figure~\ref{fig:davisDomain}). Davis \etal collected data for their analysis through questionnaires to experienced navigators. The navigators were presented with the case that, in open sea, (a) there is a target vessel on their starboard side and (b) there is a target vessel on their port side. In each case they were asked at what range they would take evasive action --- alter course distance, ACD --- and what resultant passing distance --- closest point of approach, CPA --- they wanted to achieve. The results, in Table \ref{tab:acd}, confirmed Goodwin's conclusion that the ship domain is larger on the starboard sector.
\begin{table}[h]
\centering
\begin{tabular}{l|l l }
Average & Starboard Threat & Port Threat\\
\hline
ACD & 4.3nm & 2.6nm \\
CPA & 1.8nm & 1.6nm \\
\hline
\end{tabular}
\vspace{.1in}
\caption{The distances are larger when the ownship is the give-way vessel.}
\label{tab:acd}
\end{table}
}
\par{% fuzzy sets
In 1993, Zhao \etal\cite{Zhao_etal} applied the theory of personal space (or Proxemics) to ship domain by creating a ship-person with the brain of a navigator and the body of a ship. The authors considered the case where a navigator has determined a target ship's trajectory will minimally violate his domain. But as Fujii and Tanaka put it, the ship domain is ``more of a psychological barrier than a stone wall,''\sidenote{Fujii and Tanaka. pp. 544.} and since the ship behaves according to the navigator's psychological state, it is unlikely to deviate from its course to avoid a small violation. Zhao \etal used this scenario to suggest that the boundary of the ship domain is fuzzy. A fuzzy ship domain blurs the line between the distinct dangerous and safe zones put forward by previous researchers.
}
\par{% AI
Up until this point, ship domain has been described using circles or ellipses. Recent efforts have looked at describing ship domain using polygons with various numbers of vertices. Goodwin discretisized her domain into three relative bearing categories: starboard, port, and stern. She then described each category with a circle sector. Polygonal ship domains also discretisize the relative bearing but to a greater degree. Ship domains determined using artificial intelligence calculate the distances at relative bearings from questionnaires and simulators. These methods, however, require access to mariners and large numbers of learning sets.\cite{Pietrzykowski}\cite{Pietrzykowski_Uriasz} The benefit, though, is that self-reported data gives researchers an insight into the effects of parameters such as ship size, ship maneuvering capabilities, waterway type, hydrological and meteorological conditions, own and relative speeds, and traffic density. In the past, determining the effect of these parameters through statistical methods suffered from lack of data. With the introduction of automatic identification system (AIS) data, statistical ship domain analysis can now be done with more data and more
accuracy.
}
\par{% Hansen
\begin{marginfigure}
\centering
\includegraphics{intensityLarger.png}
\caption[The intensity plot suggests the ship domain is an ellipse with length 8L and width 3.2L, where L is the ship length.]{The intensity plot suggests the ship domain is an ellipse with length 8L and width 3.2L, where L is the ship length. Reprinted from Hansen \etal. pp. 934.}
\label{fig:hansenDomain}
\end{marginfigure}
Hansen \etal\cite{Hansen} conducted an empirical ship domain analysis using AIS data spanning over five years from three geographical areas in Danish waters. Beginning their analysis as Goodwin did, the authors calculated the distance and bearing of all surrounding vessels from the ownship, normalizing the distance by the ship's length. To determine the size and shape of the average ship domain, they plotted the time spent by target vessels at various distances from the ownship revealing an ellipse-shaped domain (see Figure \ref{fig:hansenDomain}). To determine the size of the domain fore and aft, the authors visually inspected the intensity plots and determined the domain is 8 ship's lengths fore and aft and 3.2 ship's lengths port and starboard. Fujjii and Tanaka's ship domain was $8L$ fore and aft and 3.5$L$ port and starboard.
}
\par{% Mou
Rather than construct the entire ship domain, Mou \etal\cite{Mou_2010} used AIS data from vessels operating in the Port of Rotterdam traffic separation scheme to investigate the closest point of approach (CPA). The CPA is the distance between two ships when they are at their closest point. Mou \etal explored how the CPA is affected by the length, speed, and course of the vessel. The authors constructed three separate linear regression models for the variables and found that the CPA increased as (1) the vessel size increased, (2) the vessel speed decreased, and (3) the course difference between vessels increased.
}
\par{% Gucma and Marcjan
\begin{marginfigure}
\centering
\includegraphics{GucmaDomain.jpg}
\caption[An example of a polygonal ship domain.]{An example of a polygonal ship domain. Reprinted from Gucma and Marcjan. (2012). pp. 36.}
\label{fig:gucDomain}
\forcerectofloat
\end{marginfigure}
Gucma and Marcjan\cite{Gucma} used AIS data to find the CPA distances observed over one year in the Gulf of Pomerania. The AIS data was divided by three ship types (passenger, cargo, and tanker) and then subdivided by three encounter types (crossing, overtaking, and head-on). For each pairing of ship and encounter type, the authors created the distribution of CPA distances at several relative bearings from the ownship. The expected value of the CPA for each bearing was selected as the boundary of the ship domain (see Figure~\ref{fig:gucDomain}). The authors concluded that the shape of the domain for an encounter type did not vary depending on vessel type, but that the size of the domain was larger for tankers than the other two types.
}
\par{% Zhang
The violation of ship domain as observed in AIS data is one basis for detecting near-misses. Together with the distance to the ship domain, Zhang \etal constructs a vessel conflict ranking operator (VCRO) from the relative speed between vessels and the relative orientation of the ships. The VCRO model leaves out vessel type and size which relate to the maneuverability of the vessel. The parameters of the VCRO model are estimated from Northern Baltic Sea AIS data. The higher the VCRO, the more severe the encounter; severe encounters are set aside for further expert analysis to determine if they are near collisions.\cite{Zhang} This approach to near-miss detection adheres to the traffic conflict technique's assertion that proximity is the measure of a conflict. One paper that uses the presence of evasive action rather than proximity is Mestl \etal who instead use extreme --- top 99.999\% of observations --- rate of turn (ROT)\sidenote{The top 99.999\% of ROT corresponded to ROT greater than 150 \textdegree/min.} to detect near-misses.\cite{Mestl} The authors suggest filtering on high ROT is computationally cheaper than proximity analyses. One issue is the unreliability of the ROT transmitted by AIS; in fact, the ROT is not included in the NAIS data used in this dissertation.
}
\par{% ais
The definition of the ship domain remains a current area of research as it is an important tool in maritime planning. The availability of Automatic Identification System (AIS) data to researchers has opened a new avenue of investigation into maritime traffic, including ship domain, and is covered in more detail in Chapter \ref{ch:methods}.
}
% --------------------------------------------------------------------------------------------
\section{Rules of the Road}
\par{% rules v regulations % chauvin and Lardjane
While the International Regulations for Preventing Collisions at Sea (\textsc{colregs}) have \textit{regulation} in their name, they contain very few actual regulations and a majority of rules:
\begin{quotation}
Although in modern life regulations play a large part in the control of society and its systems, maritime collision avoidance is an apparently unique example of an industrial control system mainly governed by rules.\cite{Taylor_1988}
\end{quotation}
Regulations are a form of explicit, externally applied control; its text completely defines its interpretation. The text of a rule is ambiguous and requires observing the system it refers to in order to interpret its meaning.\sidenote{Taylor. (1998). pp. 67-72.} In the \textsc{colregs}, the designation of stand-on and give-way is considered a regulation while the timing and degree of evasive maneuvers is a rule.
}
\par{% rules of thumb
Rules of thumb for interpreting the ambiguous rules were first published in 1965 and last published in 2012 by Captain A.N. Cockcroft and Jan Lameijer.\cite{Cockcroft} They suggest that alterations in course are more perceptible than alterations in speed and that an alteration of course should be at least 30\textdegree, preferably between 60\textdegree and 90\textdegree. They consider close-quarter situations to be those in which the give-way vessel has not started an evasive maneuver within 2 to 3 nautical miles from the stand-on vessel, but acknowledge that ``smaller or greater distances may apply depending on the size and maneuverability of the vessels and depending particularly upon the rate of approach``.\sidenote{Cockcroft and Lameijer. pp. 81.} If the stand-on vessel is to maneuver, they recommend that it avoid taking action that is likely to conflict with the give-way's potential maneuver.
}
\begin{marginfigure}[0.8in]
\centering
\includegraphics{give_way_pdf.png}
\caption[The ACD for the give-way vessel in Taylor's study peaked around 3.5nm.]{The ACD for the give-way vessel in Taylor's study peaked around 3.5nm. Reprinted from Taylor. (1990). pp. 241.}
\label{fig:taylorGW}
\forcerectofloat
\end{marginfigure}
\par{% taylor
In 1990, Taylor\cite{Taylor} investigated the range at which a vessel can be expected to maneuver, \ie, the alter-course distance (ACD). The probability that the give-way vessel has altered course is zero at large distances and grows to one as it approaches the stand-on vessel. As the give-way vessel comes within a range of 2nm, Taylor hypothesized that the stand-on vessel questions not \textit{when}, but \textit{whether}, the give-way vessel will take evasive action. At the point when the stand-on vessel does not believe the give-way vessel will alter course, it will take independent action to avoid a collision. To determine the expected ACD, Taylor constructed the probability that a give-way and stand-on vessel will alter course at distance $x$, given that it has not yet altered course (Figures \ref{fig:taylorGW}, \ref{fig:taylorSO}). Taylor models both vessels' behavior as a power law, $qAx^{-r}$, where $q$ is the constant probability, $0 \leq q \leq 1$, that the give-way vessel will not give-way. From the give-way vessel's point of view, $q=1$, since it has no expectation that the stand-on vessel will alter course. From the stand-on vessel's point of view, $q<<1$, since it has some doubt that the give-way vessel will evade. By assuming the constant $A$ is common for both vessels, Taylor solved for $q$ using 72 simulator observations from twelve mariners. Taylor concludes that the stand-on vessel believes that the give-way vessel will take evasive action 80\% of the time and that about 9\% of the time the stand-on vessel will maneuver first when the give-way vessel would have altered course, but at a smaller range.
}
\begin{marginfigure}
\centering
\includegraphics{stand_on_pdf.png}
\caption[The ACD for the stand-on vessel in Taylor's study peaked around 2nm.]{The ACD for the stand-on vessel in Taylor's study peaked around 2nm. Reprinted from Taylor, D.H. (1990). pp. 243.}
\label{fig:taylorSO}
\forcerectofloat
\end{marginfigure}
\par{% james
One limitation of Taylor's study is his assumption that $q$ is constant, \ie, the stand-on vessel's belief that the give-way vessel will alter course does not decrease as the give-way vessel gets closer. Critical of this assumption, James\cite{James} suggests that Bayes' law be used to, at a given distance, update the stand-on vessel's perceived probability that the give-way vessel will act based on its lack of action up to that point. The initial probability, $p_{\infty}$, will update as the give-way vessel gets closer until it reaches a critical value, $p^*$, when the stand-on vessel will take action to avoid collision. Taking $p_{\infty}$ equal to 0.9 from Taylor's study and applying lognormal distributions and Bayesian upating to the same data, James finds that the stand-on vessel will alter course when it feels there is a 20\% chance the give-way vessel will eventually alter course, $p^*=0.2$.
}
\par{% give way trade off
Due to the regulations in the \textsc{colregs}, action by the stand-on vessel is a clear signal that it does not believe the give-way will alter course. Understanding the give-way vessel's beliefs is not so straight-forward due to the rules to take early and large action. The give-way vessel must compromise between its goal to maintain course and its obligation to take early action. As seen in Taylor's study, what may appear to be a violation --- the stand-on vessel taking first action --- may actually be a difference in interpretation of the meaning of \textit{early action}.
}
\section{Informal Rules}
\par{% Kemp
\begin{marginfigure}[1.95in]
\centering
\includegraphics{kemp_headon.png}
\caption[Experienced mariner behavior is predictable in a direct head-on encounter.]{Experienced mariner behavior is predictable in a direct head-on encounter. Reprinted from Kemp. (1973). pp. 419.}
\label{fig:kempHeadOn}
\forcerectofloat
\end{marginfigure}
In 1973, Kemp began questioning if violations of the \textsc{Colregs} could be reduced by updating them to be more inline with ``what mariners consider to be the best and most expedient action in certain situations.''\cite{Kemp_1973} Kemp's goal was to identify the types of encounters in which mariners were likely to disregard the \textsc{colregs} and which actions the mariners took instead. Using a marine radar simulator, he observed the behavior of experienced mariners and na\"ive non-mariners in a head-on encounter. He found that experienced subjects all altered course to starboard in accordance with the \textsc{colregs}, while na\"ive subjects' behavior was unpredictable (see Figure \ref{fig:kempHeadOn}). Interesting, the behavior of the subjects switched in a nearly-head-on encounter when the initial CPA of less than 2nm is on the starboard side. In this case, all na\"ive subjects kept the CPA on the starboard side, while about half of the experienced subjects alter course to starboard to force a port-to-port passing (see Figure \ref{fig:kempNearlyHeadOn}). For a threat on the starboard bow, both groups' behavior is the same --- alter course to pass astern of the target vessel. For the experienced mariners, this action follows the guidance of the \textsc{colregs}. Lacking the \textsc{colregs}, the na\"ive subjects appear to be following the natural rule: pass astern of the other vessel in the most efficient manner possible. This inference is supported by observing the groups' behavior in the case of a port bow threat. In this case, in which the ownship is the stand-on vessel according to the \textsc{colregs}, we see the experienced mariners' behavior is unpredictable compared to the na\"ive subjects' (see Figure \ref{fig:kempPort}). From these observations, Kemp concludes that varying levels of understanding of the \textsc{colregs} may lead to negative action in collision avoidance --- that in some cases the \textsc{colregs} are contradictory to the principles of good seamanship. The natural law that Kemp discovered became the basis of John Wilde Crosbie's 2009 proposal to change the \textsc{colresgs} to allow either vessel to maneuver to avoid collision, with the only restriction being that the maneuvering vessel should not cross ahead of the other.\cite[0.4in]{Crosbie}
\begin{marginfigure}[-7.4in]
\centering
\includegraphics{kemp_nearlyHeadon.png}
\caption[Experienced mariner behavior is split in an initial starboard-to-starboard passing encounter.]{Experienced mariner behavior is split in an initial starboard-to-starboard passing encounter. Reprinted from Kemp. (1973). pp. 419.}
\label{fig:kempNearlyHeadOn}
\forcerectofloat
\end{marginfigure}
\begin{marginfigure}[-3.2in]
\centering
\includegraphics{kemp_port.png}
\caption[The na\"ive subjects' behavior is predictable in crossing encounters.]{The na\"ive subjects' behavior is predictable in crossing encounters. Reprinted from Kemp. (1973). pp. 422.}
\label{fig:kempPort}
\forcerectofloat
\end{marginfigure}
}
\par{
In 1997, R.D. Pike called attention to the fact that when the \textsc{colregs} where developed, the majority of vessels traveled at similar speeds. In an encounter between a high-speed craft and say, a cargo vessel, the time and distance to collision between the two vessels decreases rapidly\cite{Pike}. Chauvin and Lardjane's analysis of 62 crossing interactions between cargo vessels and ferries in the Dover Strait showed that when the give-way vessel is a ferry it makes an evasive maneuver 94\% of the time; on the other hand when the give-way vessel is a cargo vessel it makes an evasive maneuver only 67\% of the time. The probability that the give-way cargo vessel will take the first action increases as the cargo vessel's speed increases. This relates to the maneuverability of the vessel; at slower speeds, reduced water pressure on the rudder makes it harder for large cargo vessels to maneuver. The formal regulation in a crossing situation is that ``the vessel which has the other one on her own starboard side shall keep out of the way'';\cite{USCG} the informal rule uncovered in this study is that the faster vessel shall keep out of the way regardless of the position of the vessels.\cite{Chauvin}
}
\par{% rewrite colregs
To a vessel moving at high speed, a slow-moving vessel is seen more as a fixed object to be avoided rather than another vessel with which to coordinate; the slower vessel would have to make a very large, costly maneuver whereas the quicker vessel can make a slight alteration to achieve the same result. Pike enumerated several methods to reduce the risk of collisions involving variable-speed vessels including: the immediate reduction of speed to a `safe` speed upon detecting a collision risk, creating high-speed-craft-only areas, increasing the detectability of high-speed craft, and requiring the high-speed craft to take evasive action in all encounters.\sidenote{Pike. (1997).} Ferries and other high-speed craft are known to alter their course at large distances from slower vessels so as to remove the risk of collision and, therefore, the applicability of the \textsc{colregs}. Determining when the \textsc{colregs} apply, however, is ambiguous as their intended goal is not only to avoid collisions but also to avoid the risk of collision; therefore, they apply before the risk of collision exists.\cite{Zhao} In congested waterways it would be difficult to argue that a risk of collision does not perpetually exist.
}
\par{% congested simultaneous
In fact, in congested waterways, a risk of collision often exists between several vessels simultaneously. The \textsc{colregs} expect a vessel to sequentially avoid a collision with vessels in order of their imminence. The \textsc{colregs} do not protect a navigator who simultaneously avoids two collisions if, in doing so, he violates a rule. An action in accordance with the \textsc{colregs} that avoids a collision with one vessel but creates or increases a collision risk with a second vessel, however, goes against the principles of good seamanship. In the early 2000s, the frequency with which a vessel found herself the give-way vessel with respect to one target vessel and the stand-on vessel with respect to a second target vessel led to renewed criticism of the \textsc{colregs} and Crosbie's call to remove the designation of ``stand-on''.\cite{Stitt}
}
\par{With so many researchers and mariners documenting the inability of the \textsc{colregs} to effectively avoid collisions in today's complex operating environment, one might expect the frequency of collisions to be increasing. The lack of such a significant increase may, perhaps, be attributable to the fact that mariners are not adhering to the \textsc{colregs} but rather to some other set of unwritten rules -- rules which developed through necessity and while, not necessarily defensible in court, are successful in avoiding collisions.
}
\section{Summary}
\par{% summary
The literature review covers over 40 years of research. Investigative methods include shore-side radar surveys, mariner questionnaires, radar simulators, and, recently, historical AIS data. The site-specific studies took place in Japan, the U.K., Denmark, the Netherlands, and the Baltic Sea, each with their own traffic patterns, traffic separation schemes, vessel traffic services, and safety culture. Below are the main take-aways from the literature review regarding this dissertation's research questions.
\newpage
\begin{enumerate}
\item The main conclusion from the literature regarding vehicular navigation are that:
\begin{itemize}
\item Predictable action, even if unlawful, is key to safety.
\item The severity of an encounter can be measured by the timing of the first evasive maneuver. The longer vehicles stay on a collision course, the more severe the encounter.
\end{itemize}
\item The main conclusions from the literature review regarding how mariners interpret the \textsc{colregs}:
The main conclusions in the literature about how mariners interpret the \textsc{colregs}
\begin{itemize}
\item Mariners expect to pass port-to-port rather than starboard-to-starboard as evidenced by an off-center ship domain, with the distance on starboard being larger than that on port.
\item Mariners pass at closer distances the faster they are traveling. They also increase CPA in crossing encounters compared to overtaking and head-on encounters.
\item Mariner that choose to make a course alteration are expected to change course by at least 30\textdegree.
\item The stand-on vessel is justified in taking evasive action at a range of two nautical miles.
\end{itemize}
\item The main conclusions from the literature review regarding informal traffic rules:
\begin{itemize}
\item In some areas, faster and more maneuverable ships will take the first action even when they are the stand-on vessel.
\item A natural law is to pass astern of the other vessel in the most efficient manner possible.
\end{itemize}
\item The main conclusions from the literature review regarding \textsc{colregs} violations:
\begin{itemize}
\item When committing a \textsc{colregs} violation, mariners will enlarge the closest point of approach.
\item Common \textsc{colregs} violations are for the give-way vessels to pass starboard-to-starboard and for the give-way vessel to not act first.
\end{itemize}
\end{enumerate}
}
% ============================================================================
% METHODOLOGY
% ============================================================================
\chapter{Methodology}
\label{ch:methods}
\par{% intro to trajectory
AIS data is spatiotemporal data meaning it contains a spatial attribute --- GPS coordinates --- and a temporal attribute --- a timestamp, and can be represented as $p(x,y,t)$. For a given ship, the data points can be arranged chronologically into a discrete time series of latitude-longitude pairs, \ie{a trajectory}, $T = \{p_{0}, p_{1},...,p_{n}\}$.\cite{Demsar} Additional attributes may be associated with each data point, such as vessel type, vessel length, speed, course, and heading. Attributes like vessel type and length remain constant across the trajectory, while the speed, course, and heading can change with each data point. Trajectories can be described by their spatial, temporal, and spatiotemporal characteristics. Examples of spatial characteristics are geographical area, geometric shape, and straightness index. The straightness index can be calculated several ways with the simplest being the straight-line distance between the start and end points divided by the trajectory length. Temporal characteristics include time interval and duration, while spatiotemporal characteristics include things like average velocity.\cite[-0.3in]{Nehal}\cite{Li}
}
\par{% trajectory segmentation
Trajectories are associated with a moving object, such as a person, vehicle, or vessel. These moving objects engage in different activities that are associated with different movement patterns. Ship trajectories can generally be broken into \textit{stop} and \textit{move} segments. Different vessel types will have additional segments; for example, a fishing vessel may break a \textit{move} segment into a \textit{travel} and \textit{fishing} segment, with \textit{fishing} identified by repeatedly traversing a small area. Trajectory segmentation can be accomplished through statistical or rule-based methods. One of the most prominent criterion for segmenting is dwell time, which is the amount of time an object stays in one location. Other popular criteria are distance and speed between successive locations.\cite{Demsar} Pre-defined zones of interest, such as a port area or known fishing area, can also be used to classify segments of a trajectory.
}
\par{% what is a trajectory good for
Spatiotemporal data can be used to answer questions about moving objects as well as their environment. Microsoft and Google, for example, use our spatiotemporal data to deduce where we live, work, go for runs, and socialize.\cite{patent} In shipping, trajectory data can be used to identify routes, abnormal and possible illegal behavior, as well as high-traffic and potentially unsafe areas. Beyond maritime management applications, financial traders are using AIS to track vessels and predict commodities' prices.\cite{Button}
}
\par{% structured data and query
Putting ship spatiotemporal data into a relational database allows the data to be queried for the purposes of knowledge discovery. A relational database is one that stores structured data in rows and columns. The columns correspond to attributes about the data, and each row is one observation of data. Structured data is data that is labeled. Take your email inbox for example, you can search it using the \textit{to} or \textit{subject} fields because these are manually labeled upon creation of an email. The body of the email, on the other hand, is unstructured free-form text. The benefit of structured data is that it can be easily queried according to its labels; in relational databases, this is performed using Structured Query Language (SQL).
}
\begin{table}
\centering
\begin{tabular}{l l l l l}
\hline
MMSI & DateTime & LAT & LON & Heading \\
\hline
\hline
\rowcolor{Gray}
366709780 & 20170717 17:50 & 47.53 & -122.60 & 179 \\
\rowcolor{Gray}
366709780 & 20170717 17:51 & 47.54 & -122.40 & 179 \\
\rowcolor{Gray}
366709780 & 20170717 17:52 & 47.55 & -122.20 & 179 \\
\rowcolor{Gray}
366709780 & 20170717 17:53 & 47.56 & -122.00 & 179 \\
366701536 & 20170717 17:50 & 47.10 & -123.23 & 70 \\
366701536 & 20170717 17:51 & 47.11 & -123.20 & 72 \\
366701536 & 20170717 17:52 & 47.12 & -123.1 & 73 \\
366701536 & 20170717 17:53 & 47.13 & -123.17 & 70\\
\hline
\end{tabular}
\vspace{0.1in}
\caption{Example trajectory point data.}
\label{tab:relational}
\end{table}
\par{%
Table \ref{tab:relational} shows a three minute time period with two ships observed at each timestamp. An example query on this data with both spatial and temporal aspects could be ``How many vessel calls did the Port of Seattle receive on 17 July 2017?'' To answer this query, one would provide the geometry of the Port of Seattle as a polygon defined by a sequence of geographical points and simply scan each row of the database answering "yes"/"no" as to whether the row's GPS coordinates fall within the Port of Seattle polygon and the row's timestamp is on 17 July 2017. Taking the subset of rows that answered "yes" and counting the unique MMSIs in that subset yields the answer to the query.
}
\par{% TSS
A query pertinent to this dissertation is "At what headings do ships cross traffic separation schemes?" Rule 10 of the \textsc{colregs} requires vessels crossing a TSS to do so with a near-90\textdegree heading relative to the TSS. This is to eliminate confusion among surrounding vessels as to whether the crossing vessel is crossing or joining the TSS. To answer this query, we first need to identify the point at which the vessel enters a TSS, \ie{the entrance point}, and which TSS it is crossing. Traffic separation scheme information is kept in a separate table with an ID for each TSS and its geometry stored as a MultiPolygon. The TSS information is joined to Table \ref{tab:relational} where the TSS geometry contains the ship geometry, \mintinline{python}{ST_Contains(tss.geometry, ship.geometry)} (Table \ref{tab:relational1}).
\begin{table*}
\centering
\begin{tabular}{l l l l l l l l l}
\hline
MMSI & DateTime & LAT & LON & Heading & In\_TSS & TSS\_ID &TSS\_Heading & Angle\\
\hline
\hline
366709780 & 20170717 17:50 & 47.53 & -122.60 & 179 & 0 & null & null & null\\
366709780 & 20170717 17:51 & 47.54 & -122.40 & 179 & 0 & null & null & null\\
\rowcolor{Gray}
366709780 & 20170717 17:52 & 47.55 & -122.20 & 179 & 1 & 50 & 91 & 88\\
366709780 & 20170717 17:53 & 47.56 & -122.00 & 179 & 1 & 50 & 91 & null\\
366701536 & 20170717 17:50 & 47.10 & -123.23 & 70 & 0 & null & null & null\\
\rowcolor{Gray}
366701536 & 20170717 17:51 & 47.11 & -123.20 & 72 & 1 & 55 & 140 & 68\\
366701536 & 20170717 17:52 & 47.12 & -123.1 & 73 & 1 & 55 & 140 & null\\
366701536 & 20170717 17:53 & 47.13 & -123.17 & 70 & 1 & 55 & 140 & null\\
\hline
\end{tabular}
\vspace{0.2in}
\caption{Example trajectory point data with TSS information.}
\label{tab:relational1}
\end{table*}
The point at which \textit{In\_TSS} switches from $0$ to $1$ for a given ship is the entrance point. While the vessel's heading at the entrance point is already part of the table, we need to extract two points on the TSS's boundary near the entrance point and calculate the bearing between them to derive the TSS's orientation. Lastly, we take the difference in heading between the vessel and the TSS to obtain the relative entrance heading, \textit{Angle}, in Table \ref{tab:relational1}.
}
\par{% relational queries
In the two queries above, there is only one moving object in the query. The TSS query includes a more complex spatiotemporal aspect by requiring the \textit{first} point that is inside the TSS rather than a simple "yes"/"no" as to whether it entered the TSS. Questions with more than one moving object quickly get more difficult to answer. Take the query: ``How many ships came within 1 nautical mile of each other in July 2017?'' You cannot simply scan Table \ref{tab:relational} to answer this question as there is no data on the distance each ship is from every other ship at every timestamp in July. To answer this query there are several intermediate calculations that must occur. First we need to group data into separate objects, \textit{ships}, with distinct trajectories. To determine whether we should investigate whether two trajectories came within 1 nautical mile of each other, we can first check if the trajectories occur in the same area and time period. If the trajectories' bounding boxes --- the rectangles covering each entire trajectory --- are within 1 nautical mile of each other and the time interval of the trajectories overlap, then it is possible the ships came within 1 nautical mile of each other; if not, this trajectory pair can be removed from consideration. The next step is to calculate the distance between the ships at each common timestamp, the minimum of which is the distance at the closest point of approach (CPA) (Figure \ref{fig:dcpa}). To do so, we need to join the data points from \textit{trajectory 1} to \textit{trajectory 2} using timestamp as the common field to create a new table with data that can used to calculate distance between ships for each observation. Doing this for each pair of trajectories in the relational database will yield the answer to the query.
\begin{figure}
\centering
\includegraphics{dcpa.png}
\caption[The CPA occurs when the vessels are closest to each other in time and space.]{The CPA occurs when the vessels are closest to each other in time and space. Reprinted from \citet{Arumugam}.}
\label{fig:dcpa}
\end{figure}
}
\par{% encounter type and 20 min trajectory
In this dissertation, I am interested in mariners' behavior during an encounter, \ie{when another ship may influence their preferred route}. The CPA distance is used to filter all trajectories down to those that may contain evasive maneuvers (changes in speed or heading); those trajectories are further reduced to the data points just prior and just after the CPA. Depending on the geometry of the encounter, the encounter type (\ie{crossing, head-on, overtaking}) and vessel responsibilities (\ie{stand-on, give-way}) are assigned. The CPA, distance and time before CPA that each vessel maneuvers, and the direction and magnitude of the maneuver are the desired results. Of particular interest are the differences in these results between vessel type (cargo, ferry, tanker), vessel responsibility (stand-on, give-way), and whether the encounter takes place in a traffic separation scheme.
}
\par{% environment
This dissertation contributes to the reproducibility of research by confirming the findings in the literature. Significant changes from the literature review are this dissertation's geographical location, time period, and data source. None of the studies in the literature review took place in the United States or, more specifically, the Puget Sound. Due to the unique features of the Puget Sound area including its ferry traffic and traffic separation schemes, this research is able to speak to the universality of the patterns of behavior regarding evasive maneuvers, passing distances, and informal rules. The transparent and well-documented use of public data in this dissertation will allow future researchers to reproduce this analysis and identify the affects of the data source, geographic location, and/or methodology.
}
\par{%
To implement this approach, the open source relational database management system \textit{PostreSQL 12} is used with the spatial database extender for PostgreSQL, \textit{PostGIS}. PostGIS is an open-source alternative to ArcGIS and provides spatial query functionality \sidenote{examples: is a point inside a polygon? do two lines intersect?} and some spatiotemporal query functionality. The spatiotemporal functionality of databases is a current field of research and is not yet a mature aspect of major database management systems.\cite{Simoes} The database is constructed programmatically using Python 3.7.3.\sidenote{My code is available at \url{https://github.com/mkrowell/phd}} To allow my approach to scale to any geographical area and any time, I designed a procedure to automatically obtain, clean, and process raw data and produce vessel encounters that can be queried against to analyze various questions. Writing modular code has allowed me to explore the data set in a systematic and well-documented fashion and will enable future research and collaboration.
}
\section{Study Area}
\par{% study area
The area under study includes the Puget Sound and the Strait of Juan de Fuca; throughout this dissertation it will be referred to as the Puget Sound. The TSS Turning Point is not included in this analysis as it is an area of additional regulation and complex vessel interactions. The Puget Sound is home to the Northwest Seaport Alliance --- including the Port of Seattle and the Port of Tacoma, as well as the ports of Anacortes, Bellingham, Everett, Olympia, and Port Angeles. To reach the numerous seaports in both Washington State and Canada, deep-draft vessels transit the Strait of Juan de Fuca approximately 8,300 times each year, with about 7,000 transits requiring a Puget Sound pilot.\cite{Ecology} All vessels operating in Puget Sound are subject to compulsory Washington State pilotage. A subset of vessels are automatically exempt from this requirement: U.S.-flagged vessels operating solely on a coastwise, fishery, or recreational endorsement\sidenote{This includes Washington State ferries.}; U.S. and Canadian flagged vessels conducting west coast coastwise trade; and any flagged vessel with a British Columbia pilot on-board. While not automatically exempt, small passenger vessels operating exclusively in Puget Sound and small yachts can apply for an exemption.\cite{Pilotage}
}
\par{% vts
The area is served by the Vessel Traffic Service Puget Sound (VTSPS), also known as Seattle Traffic, which annually monitors 170,000 ferry transits and an additional 50,000 non-ferry transits of its area of responsibility.\cite{PSP} VTSPS was commissioned on 25 September 1972\cite{vts} and today consists of traffic separation schemes, a Vessel Movement Reporting System (VMRS), and a surveillance system comprised of radar, AIS, and closed-circuit TV through which watch-standers monitor, inform, recommend, and direct its participants. The VMRS requires vessels to report, over VHF-FM voice radio, their identity and sailing plan before they enter the VTS and to report their location at various points throughout their transit. The need to direct vessels is not common, occurring on average 40 times a year.
}
\par{% tss
The relevant traffic separation schemes are the Strait of Juan de Fuca TSS (Figure \ref{fig:straitTSS}) and the Puget Sound TSS (Figure \ref{fig:psTSS}); traffic in the Strait of Juan de Fuca TSS is jointly controlled by the U.S.-Canadian Cooperative VTS. The TSS traffic lanes are at a minimum one nautical mile in width and the buffer zone is at a maximum three nautical miles in width. In Canadian waters --- in which portions of the Strait of Juan de Fuca lie --- power-driven vessels over 20 meters are required to use the TSS. Use of the TSS in U.S. waters is not required, but if not in the TSS, a vessel must stay well clear. Generally the larger vessels are not able to safely keep clear and prefer to travel in the TSS.\sidenote{Hail, L. personal communication, 26 August 2016.} All vessels, regardless of TSS participation, must abide by the \textsc{colregs}.
}
\begin{figure}
\centering
\includegraphics{18400.pdf}
\caption{Strait of Juan de Fuca traffic separation scheme is shown in purple. The thick purple regions separate opposing lanes.}
\label{fig:straitTSS}
\end{figure}
\begin{figure*}
\centering
\includegraphics{18440.pdf}
\caption{Puget Sound traffic separation scheme is shown in purple. The thick purple regions separate opposing lanes.}
\label{fig:psTSS}
\end{figure*}
\par{% Ferries
In addition to the traffic separation schemes, there are ten ferry routes sailed by the Washington State Ferries --- the nation's largest ferry system --- with the majority cutting across a traffic separation scheme (Figure \ref{fig:wsf}).\cite{wsf}\sidenote{In 2016, a Washington State Ferry collided with a power boat. You can see the video here: \url{https://www.maritime-executive.com/article/video-boat-hits-washington-state-ferry}} When crossing a TSS, \textsc{colregs} Rule 10 requires that the vessel do so at a 90\textdegree angle, or as close to a right angle as practical, to the flow of traffic within the TSS.\cite{USCG} This is to avoid confusion as to whether the vessel is joining or crossing the TSS.
}
\begin{figure*}
\centering
\includegraphics{WashingtonStateFerries.pdf}
\vspace{-0.3in}
\caption[The majority of Washington State Ferry routes cross a TSS.]{The majority of Washington State Ferry routes cross a TSS. WSDOT. (2010).}
\label{fig:wsf}
\end{figure*}
\par{% rec
The Puget Sound is also home to thousands of recreational boaters and sailors; in 2015 there were over 125,000 registered boats in the Washington State counties bordering the Puget Sound. Small vessels require little training and/or licensing to operate; are relatively affordable; and operate under the expectation of little regulation. These conditions result in the potential for a high number of boating safety violations. In Washington State in 2014, there were 122 accidents that resulted in 22 fatalities, 44 non-fatal injuries, and over \$2 million in damages.
}
\par{% oil
}
\par{% safety records
The Puget Sound has a strong safety culture as evident by the Puget Sound Harbor Safety Committee (PSHSC) which is comprised of members from all aspects of the maritime industry with the mission to:
\begin{quotation}
to provide a proactive forum for identifying, assessing, planning, communicating, and implementing operational and environmental measures beyond statutory and regulatory requirements that promote safe, secure, and efficient use of Puget Sound and adjacent waters.\cite{Ecology}
\end{quotation}
THE PSHSC holds regular meetings with stakeholders from all areas of the maritime industry in which they discuss incidents such as Prince Rupert VTS losing all communication for four hours due to a power surge, a sailing regatta entering the traffic separations scheme and not monitoring the radio over which VTS was calling them, and environmental concerns.\sidenote{PUGET SOUND HARBOR SAFETY COMMITTEE Meeting Notes. April 5, 2017, \url{https://static1.squarespace.com/static/59356b2ce3df280bc208d8b6/t/594be07f197aeafbe95f2d6a/1498144895838/PSHSCApril2017Final.pdf}}
}
% --------------------------------------------------------------
\section{Data Source}
\par{% objectivity and proximity
Initially, traffic conflict data was gathered by posting observers on the corners of an intersection who would record the traffic volume and subjectively detect conflicts and rate their severity. The reliability of this technique --- the ability to attribute differences in the data to differences in safety rather than measurement error --- was low. To improve the objectivity of the data, researchers began to film intersections and conduct frame-by-frame analysis. This technique allowed analysts to identify the position of vehicles and calculate their proximity to one another. This type of analysis is expensive and time-consuming, but modern computer vision has increased its viability. Early ship navigation studies used observed radar tracks and simulator data. A more recent data source is AIS which provides GPS coordinates, identifying information, and other motion characteristics at regular time intervals between 2 seconds and 3 minutes. AIS vastly increases the amount of data available as well as the computational resources needed to make sense of such a granular data set.
}
\par{
The AIS data used in this dissertation comes from \url{https://marinecadastre.gov/ais/} which is a joint effort by the Bureau of Ocean Energy Management and the National Oceanic and Atmospheric Administration to make authoritative data available to planners. The source of the underlying data is the National Automatic Identification System (NAIS) run by the United States Coast Guard. The data is organized by Universal Transverse Mercator (UTM) Zone, year, and month and provided as a comma-separated values (CSV) file. The Puget Sound falls in UTM Zone 10 which is further reduced to the area of study contained within two bounding boxes:
\begin{description}
\item[Strait of Juan de Fuca] $48^{\circ} 00'00''$N and $48^{\circ} 42'00''$N and $123^{\circ} 30'00''$W and $125^{\circ} 00'00''$W
\item[Puget Sound] $47^{\circ} 00'00''$N and $48^{\circ} 12'00''$N and $122^{\circ} 00'00''$W and $122^{\circ} 47'60''$W
\end{description}
July 2017 is chosen for the month and year as the summer months see the most ferry traffic. The data is reported in World Geodetic System 84 (WGS84) coordinates and contains the following fields:\sidenote{Notable, the timestamp is not part of the AIS message but is recorded by the receiver.}
\begin{multicols}{3}
\begin{itemize}
\item MMSI
\item BaseDateTime
\item LAT
\item LON
\item Speed over Ground (SOG)
\item Course over Ground (COG)
\item Heading
\item VesselNam
\item IMO
\item CallSign
\item VesselType
\item Status
\item Length
\item Width
\item Draft
\item Cargo
\end{itemize}
\end{multicols}
}
\par{% code
The code repository for this dissertation (\url{https://github.com/mkrowell/phd}) includes a download module that programmatically downloads MarineCadastre data when the user provides the city, year, and months of interest. The United States shoreline and traffic separation schemes are also available to be downloaded from NOAA. This ensures the data used in this dissertation can easily be reproduced and/or extended to other geographical areas and years.
}
\section{Data Cleaning}
\par{% bad data
While the data is ``authoritative'', meaning it comes from a government agency who is producing the data in accordance with some law, it still requires cleaning and preprocessing before it can be used for analysis. Data cleaning is the process of detecting and correcting/removing faulty data from a data set. Faulty data may be duplicate data, conflicting data, incomplete data, or invalid data. Because data cleaning alters the source data and will impact the results of the analysis, it is important to document all processing steps. All steps described in this section are available in the cleaning module of the dissertation code. In total, 95\% of the raw data is removed from the analysis during cleaning and processing steps. Percent reductions in the following paragraphs are in relation to the data set at the beginning of each step, not the original raw data set.
}
\par{
The first step is to read in the data and normalize values. The value that signifies unavailable heading in AIS is 511; all observations of a 511 heading are replaced with NULL. Vessel types are given as codes which are mapped to cargo, ferry, fishing, passenger, recreational, tanker, or tug. Some angles --- heading and COG --- in the raw data are recorded as negative angles; these are normalized to fall between 0 and 360. Some speed over ground (SOG) are recorded as negative values; the absolute value is used to replace the negative values.
}
\par{
Duplicate data can be removed without loss of information while the other types of faulty data can be removed or corrected through statistical measures.\cite{VanDenBroeck} Due to the abundance of data, no attempt to fill in missing data is made in this dissertation; any data point with a missing required field is simply removed. The required fields are MMSI, BaseDateTime, LAT, LON, SOG, COG, and Heading. The MMSI, BaseDateTime, LAT, and LON are the spatiotemporal data that is required to make a trajectory for the vessel. The SOG, COG, and Heading are additional attributes used to characterize ship maneuvers. In the study data set, 66.8\% of points are missing a required field; all removed data points were missing a heading and nothing else. The majority of the data points with missing heading were of type recreational or unknown (Figure \ref{fig:missingtype}). As described later, this analysis only considers vessels of type cargo, ferry, and tanker, so this step does not largely impact the available data.
}
\par{% key
\begin{figure*}
\centering
\includegraphics{Comparison_Type.png}
\caption{The majority of vessels with missing heading are of type recreational and unknown which do not impact the analysis as these types are not considered.}
\label{fig:missingtype}
\forceversofloat
\end{figure*}
\begin{table}
\centering
\begin{tabular}{l l l l l l l }
MMSI & BaseDateTime & LAT & LON & SOG & COG \\
\hline
366709780 & 20170717 17:59:29 & 47.0 & -122.6 & 10 & 179 \\
366709780 & 20170717 17:59:29 & 47.0 & -122.6 & 10 & 179 \\
366709780 & 20170717 17:59:29 & 47.0 & -122.6 & \textbf{15} & 179 \\
\hline
\end{tabular}
\vspace{0.1in}
\caption{Example of duplicate keys and data. All rows are removed from the data set.}
\label{tab:dataDuplicate}
\end{table}
Each data point in the AIS data is identified by its MMSI and timestamp; this is called the primary key and must be unique across the entire data set. If two data points have the same primary key, either (1) one point is removed if all other fields match or (2) both points are removed if the other fields conflict. A simplified example is shown in Table \ref{tab:dataDuplicate}. Rows 1 and 2 are exact duplicates and result in row 2 being removed. Row 1 and row 3 have duplicate primary keys but conflicting SOG; rather than predict which one is correct using surrounding data, for simplicity, both row 1 and row 3 are removed. In the study data set, a negligible amount of data points are exact duplicates and no data points have a duplicate primary key with conflicting fields.
}
\par{% bad MMSI
The MMSI is the unique identifier of the vessel. Valid MMSI numbers are 9 digits and start with three digits between 200 and 776. The first three digits are the country code and correspond to the ship's flag State. AIS devices come with default MMSI numbers that must be manually updated once upon AIS installation. The use of default MMSI numbers in AIS can result in several ships sharing the same MMSI. There are no invalid MMSIs in the study data set at this point in the cleaning process.
}
\par{
Beyond faulty data, the MarineCadastre data contains a significant amount of data corresponding to vessels that are not moving, \ie{stop segments}. Stopped vessels are not navigating and cannot be involved in a collision.\sidenote{When a stopped vessel is struck, it is called an allision.} Moored and anchored vessels can be identified by their speed, AIS transmission interval, and navigation status. Figure \ref{fig:sog} shows that a majority of data points have a SOG close to zero. A vessel's speed over ground (SOG) may not be exactly 0 when stopped due to swaying. Ships with low speed are generally involved in mooring and/or being assisted by tugs. To retain data points related solely to navigating, all data points with a SOG under 3 knots are removed from the data set (68.7\% of the data set).
}
\begin{figure*}
\begin{tabular}{c}
\includegraphics[width=0.95\textwidth]{Raw_SOG.png}\\
\includegraphics[width=0.95\textwidth]{Cleaned_SOG.png}\\
\end{tabular}
\caption{The majority of all data points correspond to a stopped vessel. When underway, most vessels operate at a SOG between 3 and 20 knots.}
\label{fig:sog}
\end{figure*}
\par{% interval
The time intervals between data transmissions are approximately three minutes if the vessel is stopped and one minute if the vessel is underway (AIS transmits more frequently, but NAIS data is sampled at one minute intervals). All data that was transmitted at an interval over 3 minutes is removed from the data set which accounts for 2.86\% of the data set.
}
\par{% status
The navigational status of the vessel (\eg{ underway using engine, moored, not under command}) can provide some additional information but should not be solely relied on. The status field is not complete; many vessels do not update this information or do not enter it at all. Vessels with an associated status of not under command, restricted maneuverability, engaged in fishing, power-driven vessel towing astern, reserved for future use, power-driven vessel pushing ahead, or towing alongside are discarded from the data set. These statuses correspond to non-normal navigation that are out of the scope of this dissertation. Their removal amounts to 2.18\% of the data set. The reason moored and anchored status are not excluded is because those are more commonly used and may not be updated once the vessel is underway.
}
\par{% type
Vessels of type recreational, fishing, and tug are out of scope for this dissertation due to the type of activity the vessels of these types are generally engaged in. Fishing and recreational vessels do not consistently travel the shortest distance between an origin and destination while tugs are often accompanying another vessel. Keeping only cargo, ferry, and tanker types reduces the data set by 39.2\%.
}
% -------------------------------------------------------------------------
\section{Data Processing}
\par{% creation of trips
The entire vessel trajectory for a given vessel may contain stops and/or data jumps and is not a usable form of the data. The trajectory must be broken into meaningful segments, trips, at break-points. Data for a single MMSI is sorted chronologically and segmented into trips based on time jumps greater than three minutes; any sparse trips --- those containing less than 20 data points --- are removed (23.32\% of the data set). In Table \ref{tab:trips}, Trip 2 begins with a time jump of 660 seconds (the \textcolor{red}{red} row). The time jumps have corresponding location jumps that are described by the displacement between successive locations. The \textcolor{blue}{blue} row demonstrates an unrealistic relationship between the time and location jumps.
}
\begin{table}
\centering
\begin{tabular}{l l l l l }
MMSI & Interval & Trip & Maximum Distance & Displacement \\
\hline
\vdots & \vdots & \vdots & \vdots & \vdots \\
366709780 & 60 & 1 & 125 & 110 \\
366709780 & 60 & 1 & 125 & 110 \\
\textbf{\textcolor{red}{366709780}} & \textbf{\textcolor{red}{660}} & \textbf{\textcolor{red}{2}} & \textbf{\textcolor{red}{955}} & \textbf{\textcolor{red}{1150}} \\
366709780 & 60 & 2 & 150 & 110 \\
\textbf{\textcolor{blue}{366709780}} & \textbf{\textcolor{blue}{60}} & \textbf{\textcolor{blue}{2}} & \textbf{\textcolor{blue}{150}} & \textbf{\textcolor{blue}{190}} \\
366709780 & 60 & 2 & 150 & 110 \\
\vdots & \vdots & \vdots & \vdots & \vdots \\
\hline
\end{tabular}
\vspace{0.1in}
\caption{The trajectory is first split into two trips based on Time Interval (red row). Then the displacement between consecutive points is compared with the maximum distance the vessel could have travelled given its reported SOG. The blue row is an example of unrealistic data.}
\label{tab:trips}
\forcerectofloat
\end{table}
\begin{table}
\centering
\begin{tabular}{l l l l l }
MMSI & Interval & Trip & Maximum Distance & Displacement \\
\hline
\vdots & \vdots & \vdots & \vdots & \vdots \\
366709780 & 60 & 1 & 125 & 110 \\
366709780 & 60 & 1 & 125 & 110 \\
\textbf{\textcolor{red}{366709780}} & \textbf{\textcolor{red}{660}} & \textbf{\textcolor{red}{2}} & \textbf{\textcolor{red}{955}} & \textbf{\textcolor{red}{1150}} \\
366709780 & 60 & 2 & 150 & 110 \\
\sout{366709780} & \sout{60} & \sout{2} & \sout{150} & \sout{190} \\ [-1.5ex]
\hline \\[-1.5ex]
\textbf{\textcolor{orange}{366709780}} & \textbf{\textcolor{orange}{120}} & \textbf{\textcolor{orange}{2}} & \textbf{\textcolor{orange}{300}} & \textbf{\textcolor{orange}{200}} \\
\vdots & \vdots & \vdots & \vdots & \vdots \\
\hline
\end{tabular}
\caption{Because the orange row is within the expected ranges for Trip 2, it suggests that the row that is struck-out had bad GPS coordinates that resulted in an anomalous displacement.}
\label{tab:trips2}
\forcerectofloat
\end{table}
\par{
Within a single trip, the time between two consecutive positions and the SOG at the first position are used to calculate the maximum distance the vessel could have travelled between the two points. The displacement is the haversine distance between the two consecutive GPS coordinates that have been projected from WGS84 to UTM Zone 10N. If the displacement is larger than the maximum distance by more than 25\%, the data point is removed and the creation of trips is rerun; this step removes 1.4\% of the data set. The \textcolor{blue}{blue} row in Table \ref{tab:trips} has a displacement that is too large and is therefore removed from the data set. The time interval and distance fields are then recalculated (Table \ref{tab:trips2} \textcolor{orange}{orange} row). In this case, the time interval and displacement between the points just prior and just after the deleted row are within the expected ranges and Trip 2 remains in progress; if they were outside the expected ranges, Trip 3 would begin and Trip 2, consisting of only 2 data points, would be removed all together. Because the \textcolor{orange}{orange} row is within the expected ranges for Trip 2, it suggests that the row that is struck-out had bad GPS coordinates that resulted in an anomalous displacement.
}
\par{% round timestamps
Lastly, the datetime of the GPS position report in MarineCadastre data is sampled at one minute intervals. For ease of comparison, once the preprocessing of the data is completed, the timestamps are rounded to their nearest minute so that vessel positions can be compared at common timestamps.
}
\par{% eda
Now that the data is cleaned we can take the first look at the maneuvers of interest: speed and heading changes (Figures \ref{fig:accel} and \ref{fig:alter}). The majority of data points have zero acceleration and small alteration suggesting that mariners rarely make evasive maneuvers in the Puget Sound. Next, I construct the database from the data so I can relate these observed maneuvers to the presence of other vessels.
\begin{figure}
\centering
\includegraphics[width=1\textwidth]{Acceleration.png}
\caption{The majority of all data points have little acceleration.}
\label{fig:accel}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=1\textwidth]{Alteration_Degrees.png}
\caption{The majority of all data points correspond to relatively constant heading.}
\label{fig:alter}
\end{figure}
}
% --------------------------------------------------------------------
\section{Database Construction}
\par{% environment
The environmental data sets needed in the analysis are (1) traffic separation schemes, (2) shoreline, and (3) ferry terminal locations. Each data set contains spatial information in the form of a geometry; MultiPolygon for TSS, LineString for shoreline, and Point for ferry terminal locations. The data sets are loaded into separate tables in the same \textit{PostgreSQL} database. The geometry in each table is projected from its source coordinate reference system (CRS) to that in which the analysis takes place, EPSG:32610.
}
\par{% points
Next, the cleaned and processed data points are used to create the \mintinline{python}{points} table, with the spatiotemporal data stored as a \textit{PointM} data type using the \textit{PostGIS} extension, where M denotes a third dimension --- time --- in addition to the latitude and longitude. The other attributes stored in the \mintinline{python}{points} table include: MMSI, Trip, DateTime, LAT, LON, SOG, COG, Heading, Acceleration, Alteration, Vessel Name, Vessel Type, Status, and Length. The ferry terminals table is used to assign terminals to points within one nautical mile of a terminal. The Traffic Separation Schemes table is used to mark each data point as being within or outside a TSS by performing a \mintinline{python}{ST_Contains(tss.geom, point,geom)} check. Figure \ref{fig:in_tss} shows that while cargo and tanker vessels spend a majority of time in and out of a TSS, ferries spend the majority of time outside of the traffic separation schemes.
\begin{figure}
\centering
\includegraphics{TSS.png}
\caption{The majority of ferry data points fall outside of a TSS; cargo and tanker data points are mostly within a TSS.}
\label{fig:in_tss}
\end{figure}
}
\begin{figure*}
\begin{tabular}{cc}
\includegraphics[width=85mm]{allTracks.png} &
\includegraphics[width=85mm]{cargoTracks.png} \\
(a) All & (b) Cargo \\[6pt]
\includegraphics[width=85mm]{ferryTracks.png} &
\includegraphics[width=85mm]{tankerTracks.png} \\
(c) Ferry & (d) Tanker \\[6pt]
\end{tabular}
\caption{Most vessels keep to the traffic separation schemes; ferries have the most cross-TSS traffic.}
\label{fig:tracks}
\forcerectofloat
\end{figure*}
\begin{figure}
\centering
\includegraphics{Trip_Length.png}
\caption{The majority of trips are short ferry trips. The longer trips correspond to ships coming from the Pacific Ocean into Vancouver, B.C. or the south Puget Sound.}
\label{fig:tripLength}
\forcerectofloat
\end{figure}
\par{% tracks
Next, the \mintinline{python}{points} are used to generate a \mintinline{python}{tracks} table by first grouping \mintinline{python}{points} by MMSI and Trip, sorting chronologically, and then combining the result into the \textit{LineStringM} data type. These tracks are the trajectories that are used to construct encounters. The number of tracks in total is 4,542: 933 cargo tracks, 3,501 ferry tracks, and 108 tanker tracks. In Figure \ref{fig:tracks} you can clearly see the presence of the Strait of Juan de Fuca traffic separation scheme (reference Figure \ref{fig:straitTSS} for a clearer view of the TSS). The major ports in Washington State (Everett, Seattle, Tacoma) are visible in the far eastern portion of the cargo map. The cross-TSS routes of ferries can be seen in the East-West band between Coupeville and Port Townsend at the north end of the Puget Sound and the East-West band between Seattle and Bainbridge/Bremerton farther sougth. The distribution of trip lengths can be seen in Figure \ref{fig:tripLength}. The distance and duration of each track is calculated and stored in the table. If a ferry terminal is associated with the first point of a track, it is recorded as the trip's origin; similarly, if a ferry terminal is associated with the last point of a track, it is recoreded as the trip's destination.
}
\par{% CPA
Interactions between vessels are detected through the closest point of approach (CPA). If the CPA between two vessels' tracks exists, the interaction is saved to the \mintinline{python}{cpa} table. The CPA is calculated using the PostGIS \mintinline{python}{ST_ClosestPointOfApproach} function which returns the timestamp at which the CPA occurs and \mintinline{python}{ST_DistanceCPA} function which returns the distance between the two vessels at the CPA. Both functions require two LineStrings as input which is why the \mintinline{python}{tracks} table must be created as an intermediary table. The timestamp of the CPA is used to find the point in each vessel's track that corresponds to the CPA, \mintinline{python}{cpa_point_1} and \mintinline{python}{cpa_point_2}. A line is then drawn between these CPA points, \mintinline{python}{cpa_line}, and any interaction whose \mintinline{python}{cpa_line} crosses the shoreline, \mintinline{python}{ST_Intersects(cpa.cpa_line, shore.geom)} is removed from the \mintinline{python}{cpa} table. Lastly, any interaction with a CPA greater than four nautical miles is removed.
}
\par{
The \mintinline{python}{cpa} table only contains information about the closest point of approach. To get information about the vessels before and after the CPA, I must join \mintinline{python}{points} data to the \mintinline{python}{cpa} table. First, \mintinline{python}{point} data relating to \textit{ship 1} is left joined to \mintinline{python}{cpa} based on MMSI and where \mintinline{python}{points.DateTime} is between 10 minutes prior to the CPA and 20 minutes after the CPA. Next, \mintinline{python}{point} data relating to \textit{ship 2} is inner joined to the resulting table where its \mintinline{python}{points.DateTime} matches those already in the joined table. Interactions that have less than 10 data points prior to the CPA are removed from the analysis. Derived attributes are added in this step as well, including:
\begin{itemize}
\item distance between ship 1 and ship 2 at each timestamp
\item bearing between ship 1 and ship 2 at each timestamp
\item difference in heading between ship 1 and ship 2 at each timestamp
\item max course alteration for ship 1 and ship 2 during the encounter
\item time to CPA
\item distance to CPA for each ship
\end{itemize}
Figure \ref{fig:cpa} shows the distribution of CPA for various ship type pairings. For most ship type pairings there appear to be two modes, one corresponding to traffic within a single lane of the traffic separation scheme and one corresponding to traffic in opposite lanes of the TSS. Ferry-ferry interactions do not have a bi-model CPA distance distribution since ferries do not follow a TSS for a majority of their trips. The physical layout of the TSS therefore strongly influences the CPA distance.
\begin{figure*}
\centering
\includegraphics{CPA.png}
\caption{The mode of CPA distance around 2.5-3 nautical miles corresponds to traffic in opposite lanes of the TSS.}
\label{fig:cpa}
\end{figure*}
}
\par{% classification
The interactions must then be classified as head-on, overtaking, or crossing as the applicable \textsc{colregs} depend on the encounter type. The encounter type is a function of the difference in heading between the vessels as shown in Table \ref{tab:encounter} and bearing. The classification of the encounter takes place before either vessel maneuvers and, therefore, before the closest point of approach. To observe the initial course difference and bearing between any two vessels, I take the first available concurrent observation of each vessel's course. Vessels traveling near each other while not actually passing are removed from the analysis by checking the beginning and final bearing between the vessels. In a head-on encounter, for instance, the bearing from \textit{ship 1} to \textit{ship 2} must cross either 90\textdegree or 270\textdegree to indicate that the vessels did indeed pass starboard-to-starboard or port-to-port. Table \ref{tab:encounterTypes} shows the breakdown by encounter type, vessel type, and whether either ship was in a TSS. Note that at this point not all interactions involve evasive maneuvers; they simply satisfy geometric definitions of encounters and are within four nautical miles of each other. Two ships in a head-on encounter with a passing distance of 4nm may feel that is a safe passing distance and make no evasive maneuvers. Most encounters involve a ferry with ferry-ferry crossing outside of the traffic separation scheme being the most common. Figure \ref{fig:cpaencounter} shows that head-on encounters have the smallest CPA; head-on encounters are mostly likely to involve two ferries and this ship type pairing is also likely to have a small CPA.
\begin{table}
\begin{tabular}{|l|l l|}
\hline
Encounter Type & Relative Heading & Relative Bearing\\
\hline
\hline
Head-On & $165 < \alpha < 195$ & $\beta < 15$, \, $\beta > 345$\\
\hline
Overtaking & $\alpha < 15$, \, $\alpha > 345$ & $\beta < 15$, \, $\beta > 345$, \\
& & $165 < \beta < 195$\\
\hline
Crossing & $15 < \alpha < 165$ & $0 < \beta < 90$\\
& $195 < \alpha < 345$ & $270 < \beta < 360$\\
\hline
\end{tabular}
\vspace{.1in}
\caption{The encounter type depends on the course difference and bearing between the two vessels.}
\label{tab:encounter}
\end{table}
\begin{table}
\centering
\begin{tabular}{|l | l | l| l l | l l | l |}
\hline
& & & Cargo & & Ferry & & Tanker\\
\hline
& & TSS & F & T & F & T & T\\
\hline
\hline
\rowcolor{Gray}
Crossing & Cargo & F & 1 & 4 & 14 & 3 & \textendash \\
\rowcolor{Gray}
& & T & \textendash & 7 & 18 & 10 & \textendash \\
& Ferry & F & 5 & 13 & 216 & 26 & 1 \\
& & T & 3 & 8 & 30 & 12 & \textendash\\
\rowcolor{Gray}
& Tanker & T & \textendash & 1 & \textendash & \textendash & \textendash\\
\hline
Head-on & Cargo & F & \textendash & 2 & \textendash & 2 & \textendash\\
& & T & 1 & 4 & 4 & 6 & 1 \\
\rowcolor{Gray}
& Ferry & F & 1 & 5 & 34 & 6 & \textendash\\
\rowcolor{Gray}
& & T & \textendash & 3 & 4 & 3 & \textendash \\
\hline
Overtaking& Cargo & F & \textendash & 1 & 2 & \textendash & \textendash\\
& & T & 1 & 11 & 2 & 5 & 1\\
\rowcolor{Gray}
& Ferry & F & \textendash & 1 & 35 & 6 & \textendash\\
\rowcolor{Gray}
& & T & 1 & 8 & 2 & \textendash & \textendash\\
& Tanker & T & \textendash & 1 & \textendash & \textendash & \textendash\\
\hline
\end{tabular}
\vspace{0.2in}
\caption{Encounters by encounter type, vessel type, and whether vessel is in a TSS.}
\label{tab:encounterTypes}
\forcerectofloat
\end{table}
}
\par{% give way stand on
Next, the give-way and stand-on responsibilities must be assigned to the vessels in each encounter. In a head-on encounter, both vessels must give-way. In an overtaking encounter, the vessel that is astern of the other is the give-way and the vessel being overtaken is the stand-on vessel. In a crossing encounter, if \textit{ship 2} is at a bearing between 0 and 112.5 from \textit{ship 1}, then \textit{ship 1} is the give-way vessel and \textit{ship 2} is the stand-on vessel.
}
\par{%
\begin{marginfigure}
\includegraphics{CPAEncounterType.png}
\label{fig:cpaencounter}
\caption{Encounter 'none' refers to interactions where ships are simply near each other but do not have a risk of collision. The CPA distance is smallest for head-on encounters which also corresponds in general to ferr-ferry encounters.}
\end{marginfigure}
The ship encounter information is now structured in a way that can be queried to answer this dissertation's research questions. Again, all code to generate the data used in this analysis is available at \url{https://github.com/mkrowell/phd}.
}
\section{Ship Domain}
\label{sec:regress}
\par{%
The ship domain is the area around a vessel the navigator keeps clear of other vessels and can be detected by observing the distance the ownship keeps between itself and target ships. Previous ship domain research has found that the distance tends to be larger on starboard than on port and larger ahead than astern; the bearings with a larger distance correspond to bearings at which the \textsc{colregs} instruct mariners to avoid passing. To characterize the local Puget Sound ship domain, I construct a linear regression with the distance between ownship and target ship as the dependent variable. The features of ship interactions that I believe may influence this distance are the vessel types of both ships; the bearing between the ownship and target ship; the speed over ground of both ships; and whether one, none, or both ships are in a traffic separation scheme. To estimate the ship domain distance I fit a linear regression of the following form:
\begin{multline*}
D_{ijt} = \beta_0 + \beta_1 sin(B_{ijt}) + \beta_2 cos(B_{ijt}) + \beta_3 TSS_{it} + \beta_4 TSS_{jt} + \\ \beta_5 TSS_{it} \times TSS_{jt} + \beta_6 S_{it} + \beta_7 S_{jt} + \lambda_i + \gamma_j + \\
\boldsymbol{\phi} sin(B_{ijt}) \times \mathbf{X_{ijt}} + \boldsymbol{\psi} cos(B_{ijt}) \times \mathbf{X_{ijt}} + \epsilon_{ijt}
\end{multline*}
\begin{align*}
\text{where}~D_{ijt} &= \text{distance from ship \textit{i} to ship \textit{j} at time \textit{t}} \\
B_{ijt} &= \text{bearing from ship \textit{i} to ship \textit{j} at time \textit{t}} \\
TSS_{it} &= \text{whether ship \textit{i} is in a TSS at time \textit{t}} \\
TSS_{jt} &= \text{whether ship \textit{j} is in a TSS at time \textit{t}} \\
S_{it} &= \text{speed over ground of ship \textit{i} at time \textit{t}} \\
S_{jt} &= \text{speed over ground of ship \textit{j} at time \textit{t}} \\
\lambda_{i} &= \text{fixed effect for vessel type of ship \textit{i}} \\
\gamma_{j} &= \text{fixed effect for vessel type of ship \textit{j}} \\
\mathbf{X_{ijt}} &= \begin{aligned}[t]
&\text{covariates matrix consisting of the indicators}\\
&\text{for ship \textit{i} and ship \textit{j} TSS belonging at time \textit{t} and}\\
&\text{their interaction and ship \textit{i} and ship \textit{j} vessel types}\\
\end{aligned}
\end{align*}
Ship \textit{i} refers to the ownship and ship \textit{j} refers to the target ship. The bearing is from ship \textit{i} to \textit{j}, and for the same pairing of ships there will be two observations with each ship appearing as the ownship in one and the target ship in the other.
}
\par{% estimation
I estimate the parameters via Ordinary Least Squares regression using Python's \mintinline{python}{statsmodel}\cite{seabold2010statsmodels} package. The standard errors are clustered by MMSI and trip to account for correlated observations during an encounter. Using various sets of the independent variables as input to the predicted model, I can generate the conditional expectation of the distances. The scenarios of interest are:
\begin{enumerate}
\item ship \textit{i} ferry in TSS, ship \textit{j} cargo in TSS
\item ship \textit{i} ferry in TSS, ship \textit{j} ferry in TSS
\item ship \textit{i} cargo in TSS, ship \textit{j} ferry in TSS
\item ship \textit{i} cargo in TSS, ship \textit{j} cargo in TSS
\item ship \textit{i} cargo in TSS, ship \textit{j} cargo out of TSS
\end{enumerate}
where speed is the average for the vessel type in all scenarios.
}
\section{Limitations}
\par{% missing influences
While the analyses conducted in the dissertation make use of a lot of AIS data, they still lack the complete picture of the maritime operating environment in the Puget Sound. Data left out of the analysis include vessels of other types (\eg{fishing, recreational}), weather and sea state information, and grounding hazards as well as non-observable information regarding any communication between vessels. The presence of vessels not included in the analysis as well as environmental factors may be influencing the observed behavior of in-analysis vessels. Including this additional information would follow a similar methodology as described here by expanding the database to include the desired information and would require more computing time and storage.
}
% -----------------------------------------------------------------------------
% RESULTS
% -----------------------------------------------------------------------------
\chapter{Results and Conclusions}
\par{% give way v stand on
The \textsc{colregs} assign responsibilities to each vessel when they are in an overtaking, head-on, and crossing situation. The stand-on vessel has the responsibility to continue with its current speed and course. The other vessel, the give-way vessel, should take action to avoid a collision. The give-way vessel would like to make the smallest deviation necessary to prevent collision, but its action must be, according to Rule 8, made early and large enough to be apparent to the stand-on vessel.\cite{USCG} Taking action as the stand-on vessel is only permitted when it becomes apparent that the give-way vessel is not taking appropriate action.
}
\par{% minimal interference
The \textsc{colregs} assert minimal authority over the give-way vessel's choice of evasive maneuver. The navigator is free to choose whether to alter course, speed, or both and to what degree; when to begin the maneuver; the minimum acceptable passing distance; and when to return to the original course and speed. Two restrictions on this discretion are (1) in a head-on encounter, where both ships are to alter course to starboard for a port-to-port passing and (2) in a crossing encounter, where the give-way vessel is to avoid passing ahead of the stand-on vessel.\cite{Plant}
}
\par{% rigid give way stand on designation
The discretion granted by the \textsc{colregs} when deciding an appropriate collision-avoidance maneuver gives way to rigidity when deciding which vessel is to give way and which is to stand on. This determination is based on the geometry of the encounter (see Figure \ref{fig:colregpic}):
\begin{itemize}
\item the overtaking vessel gives way to the vessel being overtaken;
\item the vessel with the other to her starboard gives way to a crossing vessel;
\item and both vessels give way to each other in a head-on encounter.
\end{itemize} The geometry-based algorithm does not allow the speed and maneuverability of the vessels to enter into the decision. The strict assignment of give-way vessel can be termed a \textit{regulation} and the indistinct direction to give-way, a \textit{rule}. Regulations are a form of explicit, externally applied control; its text completely defines its interpretation. The text of a rule is ambiguous and requires observing the system it refers to in order to interpret its meaning. The navigator must rely on an interpretation of the rules that is consistent with what other mariners would expect of him. The ordinary practice of seamen can be thought of as the agreed upon interpretation of the ambiguous rules --- the patterns of behavior --- that, when necessary, supersedes the regulations. What appears to be a deviation from the \textsc{colregs}, may in fact be ``the use of informal, group rules, which are seen as violations by those on the outside, but as skilled adaptations by those on the inside.''\cite{Hale}
}
\par{% infer rules
Research problem 1 of this dissertation is ``How do mariners in the Puget Sound interpret the \textsc{colregs}?''
The key phrases that are up for interpretation are:
\begin{itemize}
\item passing at a safe distance
\item any action to avoid collision shall be\ldots made in ample time\ldots any alteration of course and/or speed to avoid collision shall\ldots be large enough to be readily apparent to another vessel observing visually or by radar
\item as soon as it becomes apparent to her that the vessel required to keep out of the way is not taking appropriate action
\end{itemize}
Parts of the \textsc{colregs} that are not up for interpretation are the rules to:
\begin{itemize}
\item give way and pass port-to-port in head-on encounters
\item to give way to the vessel on your starboard and avoid passing ahead of the vessel in a crossing encounter
\item cross traffic separation schemes at near-90 degree angles relative to the traffic separation scheme.
\end{itemize}
Observing violations of these rules provides answers to research problems 2 "Are informal rules being followed?" and research problem 3 "What is the nature and frequency of \textsc{colregs} violations.
}
\section{Safe Passing Distance}
\par{%
Rule 8c of the \textsc{colregs} states that:
\begin{quotation}
Action taken to avoid collision with another vessel shall be such as to result in passing at a safe distance.
\end{quotation}
The ship domain as described in Section \ref{sec:shipdomain} is the lower bound of what mariners consider a safe passing distance. The population of mariners operating in Puget Sound is a mix between local ferry captains and international cargo vessel crews. In addition to familiarity with the area, ferry vessels have smaller, more maneuverable vessels. For these reasons, I expected to observe ferries maintaining a smaller passing distance than cargo vessels. Querying the bearing and distance from the ownship to the target ship from the \mintinline{python}{encounters} table and plotting the results generates Figure \ref{fig:shipDomainResutls}. Each subplot represents a different pairing of vessel types between the ownship and target ship. Take the \textit{target ship ferry} - \textit{ownship cargo} pairing for example, the center of the plot (r=0) represents a static cargo vessel and each point surrounding the center is an observation of a ferry at the bearing and distance from the cargo vessel that it was observed. Due to the restricted nature of the Strait of Juan de Fuca and Puget Sound and the proximity of traffic separation schemes, vessels are required to pass closer to one another than they would in the open sea; to increase readability, Figure \ref{fig:shipDomainResutls} shows target ships that were within two nautical miles of the ownship. In each plot, the blue points correspond to target vessels that are observed in a traffic separation scheme, and the red points correspond to target vessels observed outside a traffic separation scheme.
\begin{figure*}
\centering
\textbf{Distance versus Bearing}\par\medskip
\includegraphics{Ship_Domain_1.png}
\caption{Bearing and distance for all encounter types and all areas of the Puget Sound with distance less than 2nm. Blue dots are in a TSS; red dots are not.}
\label{fig:shipDomainResutls}
\end{figure*}
}
\par{% ferry-ferry
The first plot that stands out in Figure \ref{fig:shipDomainResutls} is the \textit{target ship ferry} - \textit{ownship ferry} pairing. The plot area is almost totally covered and mostly in red; this shows (1) the large number of ferry-ferry passings, (2) that the majority of them take place outside the traffic separation schemes, and (3) the safe passing distance for this pairing is relatively small. Another plot that stands out is the \textit{target ship cargo} - \textit{ownship cargo} pairing. In contrast to the ferry-ferry plot, the majority of passings take place within a traffic separation scheme (blue dots) which accounts for the bands of points to the port and starboard. The traffic separation schemes impose order on vessels traveling in the same and opposite directions by separating vessels into ``lanes,'' with a separation zone in between. From this I can conclude that cargo vessels in the Puget Sound make use of the traffic separation schemes.
}
\par{%
Turning my attention to vessel pairings of different types, the \textit{target ship cargo} - \textit{ownship ferry} and \textit{target ship ferry} - \textit{ownship cargo} have complementary plots. In the \textit{target ship ferry} - \textit{ownship cargo} plot, I observe a buffer area ahead of the cargo vessel that has relatively few observations of ferry target vessels. In the \textit{target ship cargo} - \textit{ownship ferry} plot, I observe the opposite, with a buffer area astern of the ferry that has relatively few observations of cargo target vessels. Additionally the ratio of red to blue points appears to be opposite. This suggests that ferries avoid passing ahead of cargo vessels.
}
\par{% regression
From these plots, I hypothesize that bearing to target ship, vessel types, and whether one, both, or none of the vessels are in a traffic separation scheme influence the distance from the ownship to the target ship. The results of the ship domain regression analysis described in the Section \ref{sec:regress} is shown in the table below.
\vspace{0.1in}
\begin{fullwidth}
\begin{center}
\begin{tabular}{llll}
\toprule
\textbf{Dep. Variable:} & distance\_12 & \textbf{ R-squared: } & 0.180 \\
\textbf{Model:} & OLS & \textbf{ Adj. R-squared: } & 0.180 \\
\textbf{Method:} & Least Squares & \textbf{ F-statistic: } & 248.4 \\
\textbf{Date:} & Sat, 30 May 2020 & \textbf{ Prob (F-statistic):} & 0.00 \\
\textbf{Time:} & 15:23:58 & \textbf{ Log-Likelihood: } & -6.5667e+05 \\
\textbf{No. Observations:} & 67056 & \textbf{ AIC: } & 1.313e+06 \\
\textbf{Df Residuals:} & 67030 & \textbf{ BIC: } & 1.314e+06 \\
\textbf{Df Model:} & 25 & \textbf{ } & \\
\bottomrule
\end{tabular}
\begin{tabular}{lcccccc}
& \textbf{coef} & \textbf{std err} & \textbf{z} & \textbf{P$> |$z$|$} & \textbf{[0.025} & \textbf{0.975]} \\
\midrule
\textbf{const} & 5120.9747 & 319.617 & 16.022 & 0.000 & 4494.537 & 5747.412 \\
\textbf{type\_1\_ferry} & -419.5294 & 186.939 & -2.244 & 0.025 & -785.923 & -53.136 \\
\textbf{type\_1\_tanker} & 347.3420 & 757.388 & 0.459 & 0.647 & -1137.110 & 1831.794 \\
\textbf{type\_2\_ferry} & -225.9696 & 187.348 & -1.206 & 0.228 & -593.166 & 141.227 \\
\textbf{type\_2\_tanker} & 444.2651 & 832.260 & 0.534 & 0.593 & -1186.935 & 2075.465 \\
\textbf{tss\_1\_True} & 460.1361 & 154.333 & 2.981 & 0.003 & 157.649 & 762.623 \\
\textbf{tss\_2\_True} & 483.7138 & 138.287 & 3.498 & 0.000 & 212.676 & 754.752 \\
\textbf{tss\_both\_True} & 1073.5515 & 232.102 & 4.625 & 0.000 & 618.640 & 1528.463 \\
\textbf{bearing\_12\_sin} & 114.0510 & 365.306 & 0.312 & 0.755 & -601.935 & 830.037 \\
\textbf{bearing\_12\_cos} & -1836.9543 & 141.225 & -13.007 & 0.000 & -2113.751 & -1560.158 \\
\textbf{sog\_1} & 55.0582 & 8.911 & 6.178 & 0.000 & 37.592 & 72.524 \\
\textbf{sog\_2} & 42.9914 & 8.610 & 4.993 & 0.000 & 26.116 & 59.867 \\
\textbf{type\_1\_ferry\_sin} & 41.1128 & 271.461 & 0.151 & 0.880 & -490.941 & 573.166 \\
\textbf{type\_1\_tanker\_sin} & 198.5102 & 1055.755 & 0.188 & 0.851 & -1870.731 & 2267.752 \\
\textbf{type\_2\_ferry\_sin} & 310.0832 & 270.071 & 1.148 & 0.251 & -219.246 & 839.412 \\
\textbf{type\_2\_tanker\_sin} & 488.3426 & 1189.639 & 0.410 & 0.681 & -1843.306 & 2819.991 \\
\textbf{tss\_1\_True\_sin} & -1185.6063 & 213.580 & -5.551 & 0.000 & -1604.215 & -766.998 \\
\textbf{tss\_2\_True\_sin} & -210.0436 & 201.877 & -1.040 & 0.298 & -605.716 & 185.629 \\
\textbf{tss\_both\_True\_sin} & 2446.6064 & 373.159 & 6.556 & 0.000 & 1715.228 & 3177.985 \\
\textbf{type\_1\_ferry\_cos} & 301.1415 & 96.500 & 3.121 & 0.002 & 112.006 & 490.277 \\
\textbf{type\_1\_tanker\_cos} & 457.3471 & 128.090 & 3.571 & 0.000 & 206.295 & 708.400 \\
\textbf{type\_2\_ferry\_cos} & 301.3079 & 98.885 & 3.047 & 0.002 & 107.497 & 495.119 \\
\textbf{type\_2\_tanker\_cos} & 5.8585 & 175.110 & 0.033 & 0.973 & -337.350 & 349.067 \\
\textbf{tss\_1\_True\_cos} & -517.8695 & 136.847 & -3.784 & 0.000 & -786.084 & -249.655 \\
\textbf{tss\_2\_True\_cos} & -325.2054 & 146.896 & -2.214 & 0.027 & -613.117 & -37.294 \\
\textbf{tss\_both\_True\_cos} & -310.4683 & 195.778 & -1.586 & 0.113 & -694.186 & 73.250 \\
\bottomrule
\end{tabular}
\begin{tabular}{llll}
\textbf{Omnibus:} & 3336.757 & \textbf{ Durbin-Watson: } & 0.619 \\
\textbf{Prob(Omnibus):} & 0.000 & \textbf{ Jarque-Bera (JB): } & 4171.963 \\
\textbf{Skew:} & 0.514 & \textbf{ Prob(JB): } & 0.00 \\
\textbf{Kurtosis:} & 3.659 & \textbf{ Cond. No. } & 390. \\
\bottomrule
\end{tabular}
\end{center}
Warnings: \newline
[1] Standard Errors are robust to cluster correlation (cluster)
\end{fullwidth}
}
\newpage
\par{%
Conducting an f-test on the features of interest shows that all are of statistical significance.
\begin{tabular}{lllrr}
\toprule
Feature & F Value & P Value & DF Denom & DF Num \\
\midrule
type\_1 & 5.239 & 2.302e-05 & 1968 & 6.0 \\
type\_2 & 2.7541 & 0.0114 & 1968 & 6.0 \\
TSS & 19.056 & 7.714e-31 & 1968 & 9.0 \\
bearing & 355.764 & 0.0 & 1968 & 16.0 \\
\bottomrule
\end{tabular}
}
\vspace{0.1in}
\par{% features
By plugging different scenarios into the estimated model, I can generate the conditional mean of the ship domain distance under various circumstances. The scenarios consist of choosing a vessel type for each ship and whether each ship is in a traffic separation scheme. Using the average speed for the vessel type and calculating the average distance at each bearing $[0, 360)$ for the scenarios listed below yields the plot of ship domains in Figure \ref{fig:domainResultsRegress}:
\begin{enumerate}
\item ship \textit{i} ferry in TSS, ship \textit{j} cargo in TSS
\item ship \textit{i} ferry in TSS, ship \textit{j} ferry not in TSS
\item ship \textit{i} cargo in TSS, ship \textit{j} ferry in TSS
\item ship \textit{i} cargo in TSS, ship \textit{j} cargo in TSS
\item ship \textit{i} cargo out of TSS, ship \textit{j} cargo out of TSS
\item ship \textit{i} ferry out of TSS, ship \textit{j} cargo in TSS
\end{enumerate}
\begin{figure*}
\centering
\includegraphics{RegressionPlot.png}
\caption{The observed ship domains appear larger on the starboard and aft of the ship.}
\label{fig:domainResultsRegress}
\end{figure*}
The larger overall ship domains correspond to Scenarios 1, 3, and 4 where both ships are in a traffic separation scheme. The ship domains for these scenarios also have a larger distance on starboard than port. Port-to-port passings may take place at smaller distances since both ships are in their dedicated lane and the risk of collision is low. The smaller ship domains correspond to Scenarios 2, 5, and 6 where at least one ship is not in a TSS. I can conclude therefore that the traffic separation schemes are successful at separating traffic.
}
\section{Readily Apparent Alterations}
\par{Rule 8a and 8b of the \textsc{colregs} states that:
\begin{quotation}
Any action taken to avoid collision shall be taken in accordance with the Rules of this Part and shall, if the circumstances of the case admit, be positive, made in ample time and with due regard to the observance of good seamanship. Any alteration of course and/or speed to avoid collision shall, if the circumstances of the case admit, be large enough to be readily apparent to another vessel observing visually or by radar; a succession of small alterations of course and/or speed should be avoided.
\end{quotation}
Identifying large evasive maneuvers proved difficult; large course alterations were associated with turning points in routes as vessels made their way to and from ports rather than collision-avoidance. One explanation for why so few encounters occur and do not include apparent alterations could be that ferry captains can observe on AIS that a cargo vessel is in the TSS before leaving the ferry terminal. If a cargo vessel will be nearby during the ferry crossing, ferries take a slightly arcing route across the TSS to pass astern of the cargo vessel rather than beginning on a direct route and making a large course alteration in the middle of the TSS.
}
\par{%
To investigate this hypothesis, I took the ferries traveling between Seattle and Bainbridge and compared their route characteristics between when they were encountering another ferry (319 encounters) and when they were encountering a cargo vessel (51 encounters). The length and duration plots in \ref{fig:tripChar} show that ferry trips are longer in length and duration when the ferry is encountering a cargo vessel as compared to a ferry vessel. Plot (c) shows the straightness index, which is the displacement of the trip divided by the length. If the trip is straight across, the straightness index would be 1. The figure shows that when the target vessel is a cargo vessel, the straightness of the route slightly decreases.
}
\begin{figure*}
\begin{tabular}{cc}
\includegraphics[width=85mm]{lengthTrip.png}&
\includegraphics[width=85mm]{durationTrip.png} \ \\
(a) Length & (b) Duration \\[6pt]
\includegraphics[width=85mm]{si.png} &
\includegraphics[width=85mm]{sogTrip.png} \\
(c) Straightness & (d) Speed \\[6pt]
\end{tabular}
\caption{Most vessels keep to the traffic separation schemes; ferries have the most cross-TSS traffic.}
\label{fig:tripChar}
\forcerectofloat
\end{figure*}
\par{%
This supports the hypothesis that the crossing ferry slightly alter their route across the TSS to avoid cargo vessels. This type of collision-avoidance does not show up in the data as large course or speed alterations. The stand-on and give-way cargo vessels show no difference in behavior; the appropriate stand-on behavior of the ferry is to avoid the give-way ferry. International cargo vessels will have a local pilot on-board who can inform them of this informal ferry rule.
}
\section{Traffic Separation Scheme Crossing Angle}
\par{
Rule 10 of the \textsc{colregs} specifies that:
\begin{quotation}
A vessel shall, so far as practicable, avoid crossing traffic lanes but if obliged to do so shall cross on a heading as nearly as practicable at right angles to the general direction of traffic flow.
\end{quotation}
Again, the rule is intended to make ships crossing the traffic separation scheme distinct from ships which are joining the traffic separation scheme. Observing Washington State Ferry (WSF) crossings of the traffic separation schemes between Seattle on the east and Bainbridge and Bremerton on the west using the approach set out in the methodology, shows a bi-modal distribution of crossing angles (Figure \ref{fig:entrance_angle_his}). The location of the TSS entrances are mapped in Figure \ref{fig:anlge}, where blue points correspond to entrance angles between 80 and 90 degrees relative to the TSS and red points correspond to entrance angles between 0 and 80.
\begin{figure*}
\centering
\includegraphics[width=1\textwidth]{TSS_Entrance_Angle.png}
\caption{Washington State Ferries' relative angle to traffic separation scheme.}
\label{fig:entrance_angle_his}
\end{figure*}
\begin{figure*}
\centering
\includegraphics{entrance_angle.png}
\caption{Points where a WSF entered a traffic separation scheme.}
\label{fig:anlge}
\forceversofloat
\end{figure*}
}
\par{
The mode near 90 corresponds to ferries traveling between Seattle and Bainbridge where the ferry route already crosses the traffic separation scheme at a near-90 degree angle (Figure \ref{fig:bainbridge}). The mode near 70 corresponds to ferries traveling between Seattle and Bremerton where the ferry route is at a non-90 degree angle to the TSS (Figure \ref{fig:bremerton}). Because all Seattle-Bremerton ferries crossed the TSS at a non-90 degree angle this behavior is not considered a violation but rather an informal rule. An explanation for why this informal rule has not caused a safety concern could be that the distinct WSF vessels along with their AIS information is enough information for other vessels to know that the ferries are following a known route across the TSS and not joining the TSS.
\begin{figure*}
\centering
\includegraphics{angle90.png}
\caption{Example Seattle-Bainbridge ferry route that crosses the TSS at a relative 90 degree angle.}
\label{fig:bainbridge}
\end{figure*}
\begin{figure*}
\centering
\includegraphics{angleBad.png}
\caption{Example Seattle-Bremerton ferry route that crosses the TSS at a relative non-90 degree angle.}
\label{fig:bremerton}
\end{figure*}
}
\newpage
\section{Starboard-Starboard Head-On Passings}
\begin{marginfigure}
\includegraphics{headonCPA.png}
\caption{The does not appear to be a difference in port-port and starboard-starboard CPA distance.}
\end{marginfigure}
\par{
The \textsc{colregs} require port-to-port passings in head-on encounters. Looking at head-on encounters with a CPA of two nautical miles or less (this is to remove the head-on encounters that appear at larger distances due to traffic in the TSS), 40\% are port-to-port while 60\% are starboard-to-starboard. Investigating the starboard-to-starboard passings (Figure \ref{fig:head_on_encounters}), I found that the majority correspond to ferry-ferry passings. Ferries have known routes and a known population of navigators. Some of the known routes require a starboard-to-starboard passing on occasion. For example, the Seattle-Bainbridge route is north of the Seattle-Bremerton route; when one ferry is pulling into Seattle from Bainbridge while another is pulling out of Seattle to Bremerton, this will occur starboard-to-starboard.
}
\par{
The other starboard-to-starboard passings almost always have one vessel in the TSS and the other vessel outside the TSS. The vessel in the TSS is expected to continue in the TSS and may not be able to maneuver outside the TSS; the vessel outside the TSS should not enter the TSS in the wrong direction to force a port-to-port passing.
\begin{figure}
\centering
\includegraphics{HO.png}
\caption{The majority of starboard-to-starboard head-on passings are attributed to ferries.}
\label{fig:head_on_encounters}
\end{figure}
\begin{figure}
\centering
\includegraphics{HO_Split.png}
\caption{When one vessel is in the TSS and the other is not, the informal rule is to pass starboard-to-starboard and not force a port-to-port.}
\label{fig:my_label}
\end{figure}
}
\newline
\section{Conclusions}
\par{%
What this dissertation has discovered is that the Puget Sound is a safe maritime environment regarding ferry and cargo vessels. The traffic separation schemes limit the number of encounters and keep passing ships at a safe distance. The Washington State Ferries appear to avoid a risk of collision with cargo vessels by delaying their crossing of the traffic separation schemes in order to pass astern of cargo vessels regardless of their give-way status. They do this by slight altering their course and speed rather than making noticeable alterations. Outside of the informal rules, no violations of the \textsc{colregs} were discovered. The main conclusion from this research are:
\begin{enumerate}
\item Cargo vessels use the traffic separation schemes to maintain safe passing distance.
\item No cargo vessel was found to alter course or speed in response to the presence of a ferry vessel.
\item Ferries cross the traffic separation scheme at an angle that aligns with their route rather than at a strictly 90 degree angle.
\item Ferries make small alterations to avoid encountering cargo vessels.
\item Passing distances are slightly larger on starboard than on port.
\item Ferries are most likely to pass starboard-to-starboard due to the layout of their routes; other starboard-to-starboard head-on encounters occur when one vessel is traveling in the TSS and the other is not.
\end{enumerate}
}
\par{%
}
% \par{% overview summary of characteristics
% Seattle has a very good safety culture. In 2014, it had only 4 reported incidents of type collision, allision, or grounding compared to the lower Mississippi which had 59
% \sidenote{Puget Sound Partnership. (2014). VTRA 2010 Final Report. \url{https://www.seas.gwu.edu/\~dorpjr/tab4/publications\_VTRA\_Update\_Reports.html}}
% \sidenote{USCG. (1973). Increased Safety through Vessel Traffic Systems. \textit{Proceedings of the Marine Safety Council.}, \textbf{30}, 12. pp. 251-257.}
% \sidenote{USCG. (2016). Coast Pilot 7 Pacific Coast: California, Oregon, Washington, Hawaii, and Pacific Islands.} The need to direct vessels is not common, occurring, on average, 40 times a year.\sidenote{USCG. (2016). Coast Guard Intervenes in Dangerous Vessel Traffic in Puget Sound. \url{http://www.uscgnews.com/go/doc/4007/2858366/Coast-Guard-intervenes-in-dangerous-vessel-traffic-in-Puget-Sound}}
% }
% =================================================================
% APPENDICES
% ==================================================================
% \begin{appendices}
% \appendixpage
% \noappendicestocpagenum
% \addappheadtotoc
% \end{appendices}
% =================================================================
% REFERENCES
% ==================================================================
\backmatter
\bibliographystyle{plainnat}
\bibliography{references.bib}
\end{document}
| {
"alphanum_fraction": 0.7648619893,
"avg_line_length": 131.4845744681,
"ext": "tex",
"hexsha": "03a74c4288f4027d31ab837519df8f4a907a54f7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6e139e64e11e29cb5a651c97740ec2972b02e663",
"max_forks_repo_licenses": [
"FTL",
"RSA-MD"
],
"max_forks_repo_name": "mkrowell/phd",
"max_forks_repo_path": "reports/dissertation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e139e64e11e29cb5a651c97740ec2972b02e663",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"FTL",
"RSA-MD"
],
"max_issues_repo_name": "mkrowell/phd",
"max_issues_repo_path": "reports/dissertation.tex",
"max_line_length": 2316,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e139e64e11e29cb5a651c97740ec2972b02e663",
"max_stars_repo_licenses": [
"FTL",
"RSA-MD"
],
"max_stars_repo_name": "mkrowell/phd",
"max_stars_repo_path": "reports/dissertation.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 57550,
"size": 247191
} |
\documentclass[a4paper]{article}
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{listings}
\usepackage{algorithmic}
\usepackage[ruled]{algorithm2e} % For algorithms
\usepackage[colorinlistoftodos]{todonotes}
\title{High Performance Computing and Big Data - Cloud computing}
\author{Federico Tavella, Student number 11343605}
\date{\today}
\begin{document}
\maketitle
\section{Exercise 1}
%Explain Oliver what your idea is: tell him which features of the HPC Cloud you are going to use to tackle his problem. Tell him how beautiful your answer is because... do you need to change the montecarlo program?
In order to tackle this problem, we can use the HPC Cloud to allocate multiple virtual machines, each of which will compute part of the "pie".
For example, let's consider a scenario where Oliver is the chief cook and he has $N$ cook assistants. In this scenario, we can cook the pie by assembling $N$ small pies cooked by different chefs. However, the assistants are not self-aware of how to cook the pie, so they need to follow the intructions given by the chief. At this point, Oliver can send the receipt (i.e., the \texttt{montecarlo} program) to each assistant and order them to cook part of the pie. Moreover, Oliver himself could partecipate to the preparation of the pie by preparing his part, reducing the number of assistants from $N$ to $N-1$. Finally, all the small pies can be combined to compose the final pie.
We can see how this approach does not need to change the montecarlo program, but it focuses on distributing the amount of work among different workers (i.e., virtual machines).
\section{Exercise 2}
%Oliver’s recipe is missing pictures. Draw a sketch that Oliver can keep next to his recipe forever, that illustrates what you came up with in the previous exercise.
\begin{figure}[htbp]
\centering
\includegraphics[width=\textwidth]{res/montecarlo.png}
\caption{UML diagram of the problem.}
\label{fig:montecarlo}
\end{figure}
In Figure~\ref{fig:montecarlo}, each partecipant is represented by one class. In our scenario, Oliver is the master who is telling to the other assistants (i.e., workers) how to cook the pie. Each cook is one different virtual machine inside the same network, with an internal IP associated. Since the master knows every IP of the workers, he can send the task to them.
\section{Exercise 3}
%Sketch in pseudocode (https://en.wikipedia.org/wiki/Pseudocode) what you think the programs look like: the master and the worker.
Algorithm~\ref{alg:master} explains how the program for the master looks like. Basically, the master divides the number of points by the number of worker - thus $N'$ - to distribute equally the task among the worker (obviously, if $N$ is not a multiple of $W$ there will be a worker with more points to compute). This suddivision is implicitly done with \texttt{OpenMP} or \texttt{MPI}. Each worker, illustrated with Algorithm~\ref{alg:worker}, computes the number of points $M'$ contained in the circle of radius $R$. This part is executed parallely among all worker. Consequently, the number of points inside the circle is calculated as the sum of all the values provided by the workers and, finally, we can compute an approximation of $\pi$.
\begin{algorithm}[htbp]
%\SetAlgoNoLine
\KwIn{N amount of points, R radius, W number of workers}
\KwOut{An approximation of $\pi$}
$N' \leftarrow N/W$\;
\ForAll{$worker$ in $W$}{
\textit{calculate M' with N' points using worker}\;
\textit{store M'}
}
\textit{Wait last worker to finish}\;
$M \leftarrow \sum(M')$\;
$\pi \leftarrow 4 \cdot M/N$\;
\caption{Pseudocode for the master.}
\label{alg:master}
\end{algorithm}
\begin{algorithm}[htbp]
%\SetAlgoNoLine
\KwIn{N' amount of points, R radius}
\KwOut{M' number of points inside the circle of radius R}
$M' \leftarrow 0$\;
\ForAll{$point$ \textbf{in} $N'$}{
\If{$isInside(point, circle(R)) = True$}{
$M' \leftarrow M' + 1$\;
}
}
\caption{Pseudocode for the worker.}
\label{alg:worker}
\end{algorithm}
\section{Exercise 4}
Firstly, we need to create the virtual machine representing the master. We can use the template from the \texttt{MPI} extra exercise, tweaking the values for this problem (e.g., reducing the number of cores to 1 and making the image non-persistent). Once the master has been created, we need to configure it for SSH communication and also with the \texttt{makeme\_master.sh} script. Secondly, we need to create one virtual machine for each worker. Even in this case, we can re-use the \texttt{MPI} template. For each worker node, we need to set a password and to configure it using the \texttt{makeme\_worker.sh} script, in order to indicate to the node who is the master.
\section{Exercise 5}
% Having to create all workers by hand is very tedious. You are so happy that you can show Oliver how to let the master create the workers on demand!!! Thus, for a solution where the master will create the workers on demand (the master now receives the amount of workers, W, as an extra parameter):
\subsection{Extra components}
%a) What extra components from the HPC Cloud do you need to let the master create the workers?
In order to provide the possibility for the master to create the workers, we need to integrate the \texttt{XML-RPC} API in our project. This allow us to create/deploy/terminate \textbf{programmatically} virtual machines.
\subsection{Pseudocode}
%b) Do the same as in exercise 3: write pseudocode for what the master looks like. Does the worker change? Think of releasing resources when the computation is finished!
In Algorithm~\ref{alg:master_2} we can see how the pseudocode for the master changes. In fact, now the master has the responsability to allocate and deallocate virtual machines corresponding to the workers. However, in this way we have a more efficient consumption of resource: once a worker complete its task, we can release the resources that otherwise would be held until we release them throught the UI.
\begin{algorithm}[htbp]
%\SetAlgoNoLine
\KwIn{N amount of points, R radius, W number of workers}
\KwOut{An approximation of $\pi$}
\textit{//T is the template used for the workers}\;
\ForAll{$worker$ in $W$}{
$worker_{id} \leftarrow allocateVM(worker,T)$\;
$deployVM(worker_{id})$\;
\textit{calculate M' with N' points using worker}\;
\textit{store M'}\;
$terminateVM(worker_{id})$\;
}
$M \leftarrow \sum(M')$\;
$\pi \leftarrow 4 \cdot M/N$\;
\caption{Pseudocode for the master.}
\label{alg:master_2}
\end{algorithm}
\subsection{UI relevant steps}
%c) Do the same as in exercise 4: write the relevant steps that you must follow in the UI to run the new version of the master (yes, assume again that you get a program with this version of the master’s work) and workers; again, 1 master and 3 workers
Using \text{XML-RPC}, we can avoid to use the UI to create the virtual machines. Thus, we can create the master and the workers programmatically. Obviously, we need a template to do so, but that could also be created without the UI.
\end{document} | {
"alphanum_fraction": 0.763718437,
"avg_line_length": 58.106557377,
"ext": "tex",
"hexsha": "a8a06163c38cca1fd0838cc9cc913bf233a74296",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fd1e792e7e5199c65f324f9256a60004558e3208",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tfederico/HPC_Assignments",
"max_forks_repo_path": "Assignment 4 - Cloud computing/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fd1e792e7e5199c65f324f9256a60004558e3208",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tfederico/HPC_Assignments",
"max_issues_repo_path": "Assignment 4 - Cloud computing/main.tex",
"max_line_length": 744,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fd1e792e7e5199c65f324f9256a60004558e3208",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tfederico/HPC_Assignments",
"max_stars_repo_path": "Assignment 4 - Cloud computing/main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1833,
"size": 7089
} |
\filetitle{jforecast}{Forecast with judgmental adjustments (conditional forecasts)}{model/jforecast}
\paragraph{Syntax}\label{syntax}
\begin{verbatim}
F = jforecast(M,D,Range,...)
\end{verbatim}
\paragraph{Input arguments}\label{input-arguments}
\begin{itemize}
\item
\texttt{M} {[} model {]} - Solved model object.
\item
\texttt{D} {[} struct {]} - Input data from which the initial
condition is taken.
\item
\texttt{Range} {[} numeric {]} - Forecast range.
\end{itemize}
\paragraph{Output arguments}\label{output-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{F} {[} struct {]} - Output struct with the judgmentally
adjusted forecast.
\end{itemize}
\paragraph{Options}\label{options}
\begin{itemize}
\item
\texttt{'anticipate='} {[} \emph{\texttt{true}} \textbar{}
\texttt{false} {]} - If true, real future shocks are anticipated,
imaginary are unanticipated; vice versa if false.
\item
\texttt{'currentOnly='} {[} \emph{\texttt{true}} \textbar{}
\texttt{false} {]} - If \texttt{true}, MSE matrices will be computed
only for the current-dated variables, not for their lags or leads.
\item
\texttt{'deviation='} {[} \texttt{true} \textbar{}
\emph{\texttt{false}} {]} - Treat input and output data as deviations
from balanced-growth path.
\item
\texttt{'dtrends='} {[} \emph{\texttt{@auto}} \textbar{} \texttt{true}
\textbar{} \texttt{false} {]} - Measurement data contain deterministic
trends.
\item
\texttt{'initCond='} {[} \emph{\texttt{'data'}} \textbar{}
\texttt{'fixed'} {]} - Use the MSE for the initial conditions if found
in the input data or treat the initical conditions as fixed.
\item
\texttt{'meanOnly='} {[} \texttt{true} \textbar{}
\emph{\texttt{false}} {]} - Return only mean data, i.e.~point
estimates.
\item
\texttt{'plan='} {[} plan {]} - Simulation plan specifying the
exogenised variables and endogenised shocks.
\item
\texttt{'vary='} {[} struct \textbar{} \emph{empty} {]} - Database
with time-varying std deviations or cross-correlations of shocks.
\end{itemize}
\paragraph{Description}\label{description}
When adjusting the mean and/or std devs of shocks, you can use real and
imaginary numbers ot distinguish between anticipated and unanticipated
shocks:
\begin{itemize}
\item
any shock entered as an imaginary number is treated as an anticipated
change in the mean of the shock distribution;
\item
any std dev of a shock entered as an imaginary number indicates that
the shock will be treated as anticipated when conditioning the
forecast on the reduced-form tunes.
\item
the same shock or its std dev can have both the real and the imaginary
part.
\end{itemize}
\paragraph{Description}\label{description-1}
\paragraph{Example}\label{example}
| {
"alphanum_fraction": 0.7164874552,
"avg_line_length": 30.6593406593,
"ext": "tex",
"hexsha": "a61e7dcbe9c470268c7a973d0347fc2d5013f9d2",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-17T07:06:39.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-17T07:06:39.000Z",
"max_forks_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_forks_repo_path": "-help/model/jforecast.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_issues_repo_issues_event_max_datetime": "2020-09-02T10:40:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-03-28T08:13:20.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_issues_repo_path": "-help/model/jforecast.tex",
"max_line_length": 104,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_stars_repo_path": "-help/model/jforecast.tex",
"max_stars_repo_stars_event_max_datetime": "2017-12-06T13:38:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-06T13:38:38.000Z",
"num_tokens": 813,
"size": 2790
} |
\documentclass[letterpaper,final,12pt,reqno]{amsart}
\usepackage[total={6.3in,9.2in},top=1.1in,left=1.1in]{geometry}
\usepackage{times,bm,bbm,empheq,verbatim,fancyvrb,graphicx}
\usepackage[dvipsnames]{xcolor}
\usepackage[kw]{pseudo}
\pseudoset{left-margin=15mm,topsep=5mm,idfont=\texttt}
\usepackage{tikz}
\usetikzlibrary{decorations.pathreplacing}
% hyperref should be the last package we load
\usepackage[pdftex,
colorlinks=true,
plainpages=false, % only if colorlinks=true
linkcolor=blue, % ...
citecolor=Red, % ...
urlcolor=black % ...
]{hyperref}
\DefineVerbatimEnvironment{cline}{Verbatim}{fontsize=\small,xleftmargin=5mm}
\renewcommand{\baselinestretch}{1.05}
\newtheorem{lemma}{Lemma}
\newcommand{\Matlab}{\textsc{Matlab}\xspace}
\newcommand{\eps}{\epsilon}
\newcommand{\RR}{\mathbb{R}}
\newcommand{\grad}{\nabla}
\newcommand{\Div}{\nabla\cdot}
\newcommand{\trace}{\operatorname{tr}}
\newcommand{\hbn}{\hat{\mathbf{n}}}
\newcommand{\bb}{\mathbf{b}}
\newcommand{\be}{\mathbf{e}}
\newcommand{\bbf}{\mathbf{f}}
\newcommand{\bg}{\mathbf{g}}
\newcommand{\bn}{\mathbf{n}}
\newcommand{\br}{\mathbf{r}}
\newcommand{\bu}{\mathbf{u}}
\newcommand{\bv}{\mathbf{v}}
\newcommand{\bw}{\mathbf{w}}
\newcommand{\bx}{\mathbf{x}}
\newcommand{\bV}{\mathbf{V}}
\newcommand{\bX}{\mathbf{X}}
\newcommand{\bxi}{\bm{\xi}}
\newcommand{\bzero}{\bm{0}}
\newcommand{\rhoi}{\rho_{\text{i}}}
\newcommand{\ip}[2]{\left<#1,#2\right>}
\newcommand{\Rpr}{R_{\text{pr}}}
\newcommand{\Rin}{R_{\text{in}}}
\newcommand{\Rfw}{R_{\text{fw}}}
\begin{document}
\title[The FAS multigrid scheme]{The full approximation storage multigrid scheme: \\ A 1D finite element example}
\author{Ed Bueler}
\begin{abstract} This note describes the full approximation storage (FAS) multigrid scheme for an easy one-dimensional nonlinear boundary value problem. The problem is discretized by a simple finite element (FE) scheme. We apply both FAS V-cycles and F-cycles, with a nonlinear Gauss-Seidel smoother, to solve the resulting finite-dimensional problem. The mathematics of the FAS restriction and prolongation operators, in the FE case, are explained. A self-contained Python program implements the scheme. Optimal performance, i.e.~work proportional to the number of unknowns, is demonstrated for both kinds of cycles, including convergence nearly to discretization error in a single F-cycle. \end{abstract}
\thanks{Version 2. This note is expository, and submission for publication is not foreseen. Thanks to Matt Knepley for thoughtful comments.}
\maketitle
\tableofcontents
\thispagestyle{empty}
\bigskip
\section{Introduction} \label{sec:intro}
We consider the full approximation storage (FAS) scheme, originally described by Brandt \cite{Brandt1977}, for an easy nonlinear elliptic equation. Like other multigrid schemes it exhibits optimal solver complexity \cite{Bueler2021} when correctly applied, as we demonstrate at the end. Helpful write-ups of FAS can be found in well-known textbooks \cite{BrandtLivne2011,Briggsetal2000,Trottenbergetal2001}, but we describe the scheme from a finite element point of view, compatible with the multigrid approaches used for obstacle problems \cite{GraeserKornhuber2009} for example, and we provide an easy-to-digest Python implementation.
Our problem is an ordinary differential equation (ODE) boundary value problem, the nonlinear Liouville-Bratu equation \cite{Bratu1914,Liouville1853}:
\begin{equation}
-u'' - \lambda\, e^u = g, \qquad u(0) = u(1) = 0. \label{liouvillebratu}
\end{equation}
In this problem $\lambda$ is a real constant, $g(x)$ is given, and we seek $u(x)$. This equation arises in the theory of combustion \cite{FrankKameneckij1955} and the stability of stars.
Our goal is to solve \eqref{liouvillebratu} in optimal $O(m)$ time on a mesh of $m$ elements. A Python implementation of FAS, \texttt{fas1.py} in directory \texttt{fas/py/},\footnote{Clone the Git repository\, \href{https://github.com/bueler/mg-glaciers}{\texttt{github.com/bueler/mg-glaciers}}\, and look in the \texttt{fas/py/} directory.} accomplishes such optimal-time solutions both by V-cycle and F-cycle strategies (section \ref{sec:cycles}). (This note serves as its documentation.) While optimal-time solutions of 1D problems are not unusual, FAS and other multigrid strategies for many nonlinear 2D and 3D partial differential equations (PDEs) are also optimal. This makes them the highest-performing class of solver algorithms for such problems.
By default the program \texttt{fas1.py} solves \eqref{liouvillebratu} with $g=0$. A runtime option \texttt{-mms}, the ``method of manufactured solutions'' \cite{Bueler2021}, facilitates testing by specifying a problem with known exact solution and nonzero source term. In detail, the solution is $u(x)=\sin(3\pi x)$, and by differentiation $g(x)=9\pi^2 \sin(3\pi x) - \lambda e^{\sin(3\pi x)}$.
\section{The finite element method} \label{sec:femethod}
To solve the problem using the finite element (FE) method \cite{Braess2007,Bueler2021,Elmanetal2014}, we rewrite \eqref{liouvillebratu} in weak form. Let $F$ be the nonlinear operator
\begin{equation}
F(u)[v] = \int_0^1 u'(x) v'(x) - \lambda e^{u(x)} v(x)\, dx, \label{operator}
\end{equation}
acting on $u$ and $v$ from the space of functions $\mathcal{H}=H_0^1[0,1]$, a Sobolev space \cite{Evans2010}. (These functions have zero boundary values and one square-integrable derivative.) Note $F(u)[v]$ is linear in $v$ but not in $u$. We also define a linear functional from the right-hand function $g$ in \eqref{liouvillebratu}:
\begin{equation}
\ell[v] = \ip{g}{v} = \int_0^1 g(x) v(x) dx. \label{rhsfunctional}
\end{equation}
Both $F(u)[\cdot]$ and $\ell[\cdot]$ are (continuous) linear functionals, acting on functions $v$ in $\mathcal{H}$, thus they are in the dual space $\mathcal{H}'$. One derives the weak form
\begin{equation}
F(u)[v] = \ell[v] \qquad \text{for all $v$ in $\mathcal{H}$} \label{weakform}
\end{equation}
by multiplying equation \eqref{liouvillebratu} by a test function $v$ and integrating by parts.
From now on we address problem \eqref{weakform}, despite its abstract form. In an FE context a clear separation is desirable between functions, like the solution $u(x)$, and the equations themselves, which are, essentially, functionals. As in linear algebra, where one indexes the equations by row indices, \eqref{weakform} states ``$v$th equation''; the equations are indexed by the test functions. The FE method will reduce the problem to a finite number of unknowns by writing $u(x)$ in a basis of a finite-dimensional subspace of $\mathcal{H}$. One gets finitely-many equations by using test functions $v$ from the same basis.
We apply the simplest possible mesh setup, namely an equally-spaced mesh on $[0,1]$ of $m$ elements (subintervals) of lengths $h=1/m$. The interior nodes (points) are $x_p=ph$ for $p=1,\dots,m-1$. This mesh supports a finite-dimensional vector subspace of $\mathcal{H}$:
\begin{equation}
\mathcal{S}^h = \left\{v(x)\,\big|\,v \text{ is continuous, linear on each subinterval, and } v(0)=v(1)=0\right\}. \label{fespace}
\end{equation}
This space has a basis of ``hat'' functions $\{\psi_p(x)\}$, one for each interior node (Figure \ref{fig:onehat}). Such a hat function $\psi_p$ is defined by two properties: $\psi_p$ is in $\mathcal{S}^h$ and $\psi_p(x_q)=\delta_{pq}$ for all $q$. Note that the $L^2$ norm of $\psi_p$ depends on the mesh resolution $h$, and that $\ip{\psi_p}{\psi_q}\ne 0$ for only three indices $q=p-1,p,p+1$. The basis of hat functions, while well-conditioned, is not orthonormal.
\begin{figure}
\includegraphics[width=0.6\textwidth]{figs/onehat.pdf}
\caption{A piecewise-linear hat function $\psi_p(x)$ lives at each interior node $x_p$.}
\label{fig:onehat}
\end{figure}
The numerical solution $u^h$ has the expansion
\begin{equation}
u^h(x) = \sum_{p=1}^{m-1} u[p] \psi_p(x) \label{fesolution}
\end{equation}
with coefficients $u[p]$ equal to the point values $u^h(x_p)$. That is, because the hat functions form a ``nodal basis'' \cite{Elmanetal2014}, $u^h$ may be represented as a vector $\bu$ in $\RR^{m-1}$ either by its coefficients in the basis $\{\psi_p\}$ or its point values:
\begin{equation}
\bu =\{u[p]\} = \{u^h(x_p)\}. \label{fevector}
\end{equation}
The FE approximation $F^h$ of the nonlinear operator $F$ in \eqref{operator} acts on functions in $\mathcal{S}^h$. Its values $F^h(w^h)[\psi_p]$ are easily computed if the transcendental integral is approximated, for example by using the trapezoid rule, as follows. Noting that the support of $\psi_p(x)$ is $[x_{p-1},x_{p+1}]$, and that the derivative of $\psi_p$ is $\pm 1/h$, we have:
\begin{align}
F(w^h)[\psi_p] &= \int_0^1 (w^h)'(x) \psi_p'(x) - \lambda e^{w^h(x)} \psi_p(x)\, dx \label{feoperator} \\
&= \int_{x_{p-1}}^{x_{p+1}} (w^h)'(x) (\pm 1/h)\,dx - \lambda \int_{x_{p-1}}^{x_{p+1}} e^{w^h(x)} \psi_p(x)\, dx \notag \\
&\approx h \left(\frac{w[p]-w[p-1]}{h} - \frac{w[p+1]-w[p]}{h}\right) - h \lambda e^{w[p]} \notag \\
&= \frac{1}{h}\left(2w[p]-w[p-1]-w[p+1]\right) - h \lambda e^{w[p]} \notag \\
&= F^h(w^h)[\psi_p] \notag
\end{align}
Note that $F^h$ is a rescaled version of a well-known $O(h^2)$ finite difference expression. Function \texttt{FF()} in \texttt{fas1.py} computes this formula.
Now consider the right-hand-side functional $\ell[v]$ in \eqref{weakform}, which we will approximate by $\ell^h[v]$ acting on $\mathcal{S}^h$. We again apply the trapezoid rule to compute the integral $\ip{g}{\psi_p}$, and we get the simple formula
\begin{equation}
\ell^h[\psi_p] = h\, g(x_p). \label{ferhs}
\end{equation}
The linear functional $\ell^h$ and the function $g$ are different objects, though they only differ by a factor of the mesh size $h$.
The finite element weak form can now be stated:
\begin{equation}
F^h(u^h)[v] = \ell^h[v] \qquad \text{for all } v \text{ in } \mathcal{S}^h. \label{feweakform}
\end{equation}
To solve \eqref{feweakform} we seek an iterate $w^h$ so that the \emph{residual}
\begin{equation}
r^h(w^h)[v] = \ell^h[v] - F^h(w^h)[v] \label{feresidual}
\end{equation}
is small for all $v$ in $\mathcal{S}^h$. Again $r^h(w^h)$ is a linear functional acting on functions in $\mathcal{S}_h$, so it suffices to apply it to a basis of test functions $v=\psi_p$:
\begin{equation}
r^h(w^h)[\psi_p] = \ell^h[\psi_p] - \frac{1}{h}\left(2w[p]-w[p-1]-w[p+1]\right) + h \lambda e^{w[p]}. \label{feresidualdetail}
\end{equation}
Solving the finite-dimensional nonlinear system, i.e.~the FE approximation of \eqref{weakform}, is equivalent to finding $w^h$ in $\mathcal{S}^h$ so that $r^h(w^h)[\psi_p]=0$ for $p=1,\dots,m-1$.
A function in \texttt{fas1.py} computes \eqref{feresidualdetail} for any source functional $\ell^h$. On the original mesh, soon to be called the ``fine mesh'', we will use formula \eqref{ferhs}. However, the FAS algorithm (sections \ref{sec:fastwolevel} and \ref{sec:cycles}) is a systematic way to introduce a new source functional on each coarser mesh.
The function $u^h(x)$ in $\mathcal{S}^h$, equivalently $\bu$ in $\RR^{m-1}$ given by \eqref{fevector}, exactly solves a finite-dimensional nonlinear system \eqref{feweakform}. In practice, however, at each stage we only possess an iterate $w^h(x)$, for which the ``algebraic error'' is
\begin{equation}
e^h = w^h - u^h. \label{feerror}
\end{equation}
On the other hand, $u^h$ is not the continuum solution either; the ``discretization error'' $u^h-u$, where $u$ is the exact solution of the continuum problem \eqref{weakform}, is also nonzero in general. The theory of an FE method will show that discretization error goes to zero as $h\to 0$, at a particular rate determined by the FE space and the smoothness of the continuum problem \cite{Elmanetal2014}, but such a theory assumes we have exactly-solved the finite-dimensional system, i.e.~that we possess $u^h$ itself. The full ``numerical error'' is the difference $w^h-u$, and we have
\begin{equation}
\|w^h-u\| \le \|w^h-u^h\|+\|u^h-u\|.
\end{equation}
The numerical error, which we want to control, is bounded by the algebraic error plus the discretization error.
In the \texttt{-mms} case of \texttt{fas1.py}, where the exact solution $u$ of the continuum problem is known, the numerical error norm $\|w^h-u\|$ is computable. Normally we cannot access $u$ or $u^h$ directly, and only the residual norm $\|r^h(w^h)\|$ is computable, but the numerical error is controlled to within a matrix condition number by the residual norm.
\section{The nonlinear Gauss-Seidel iteration} \label{sec:ngs}
Next we describe an iteration which will, if carried far enough, solve the finite-dimensional nonlinear system \eqref{feweakform} to desired accuracy. This is the nonlinear Gauss-Seidel (NGS) iteration \cite{Briggsetal2000}, also called Gauss-Seidel-Newton \cite{BrandtLivne2011}. It updates the iterate $w^h$ by changing each point value at $x_p$ to make the residual at that point zero. That is, NGS solves the problem
\begin{equation}
\phi(c) = r^h(w^h + c \psi_p)[\psi_p] = 0 \label{ngspointproblem}
\end{equation}
for a scalar $c$. Once $c$ is found we update the point value (coefficient):
\begin{equation}
w^h \leftarrow w^h + c \psi_p, \label{ngspointupdate}
\end{equation}
equivalently $w[p] \leftarrow w[p] + c$.
As in the linear Gauss-Seidel iteration \cite{Greenbaum1997}, $w[p]$ is updated in a certain nodal ordering, using current values $w[q]$ when evaluating the residual in \eqref{ngspointproblem}. However, as the residual is made zero at one point it is no longer zero at the previous points. Gauss-Seidel-type methods are called ``successive'' \cite{GraeserKornhuber2009} or ``multiplicative'' \cite{Bueler2021} corrections. ``Additive'' corrections, of which the Jacobi iteration \cite{Greenbaum1997} is the best known, are also possible, but they are somewhat less efficient. Note our program only runs in serial, so the parallelizability of the Jacobi iteration cannot be exploited.
Solving the scalar problem $\phi(c)=0$ cannot be done exactly when considering a transcendental problem like \eqref{liouvillebratu}. Instead we will use a fixed number of Newton iterations \cite[Chapter 4]{Bueler2021} to generate a (scalar) sequence $\{c_k\}$ converging to $c$. Starting from $c_0=0$ we compute
\begin{equation}
\phi'(c_k)\, s_k = -\phi(c_k), \qquad c_{k+1} = c_k + s_k, \label{ngsnewton}
\end{equation}
for $k=0,1,\dots$. From \eqref{feresidualdetail} we have
\begin{align*}
\phi(c) &= \ell^h[\psi_p] - \frac{1}{h} \left(2(w[p]+c) - w[p-1] - w[p+1]\right) + h \lambda e^{w[p]+c}, \\
\phi'(c) &= -\frac{2}{h} + h \lambda e^{w[p]+c}.
\end{align*}
The vast majority of the work of our FAS algorithms will be in evaluating these expressions.
The NGS method ``sweeps'' through the mesh, zeroing $\phi(c)$ at successive nodes $x_p$, in increasing $p$ order, as in the following pseudocode which modifies $w^h$ in-place:
\begin{pseudo*}
\pr{ngssweep}(w^h,\ell^h,\id{niters}=2)\text{:} \\+
$r(w^h)[v] := \ell^h[v] - F^h(w^h)[v]$ \\
for $p=1,\dots,m-1$ \\+
$\phi(c) := r^h(w^h + c \psi_p)[\psi_p]$ \\
$c=0$ \\
for $k=1,\dots,$\id{niters} \\+
$c \gets c - \phi(c) / \phi'(c)$ \\-
$w[p] \gets w[p] + c$
\end{pseudo*}
For FAS algorithms (next section) we also define \textsc{ngssweep-back} with ``\textbf{for} $p=m-1,\dots,1$''. Function \texttt{ngssweep()} in \texttt{fas1.py} computes either order, and the \texttt{niters} default is two.
For a linear differential equation the Gauss-Seidel iteration is known to converge subject to matrix assumptions which correspond to ellipticity of the original problem \cite[for example]{Greenbaum1997}. We expect that for weak nonlinearities, e.g.~small $\lambda$ in \eqref{liouvillebratu}, our method will therefore converge as a solution method for \eqref{feweakform}, and we will demonstrate that this occurs in practice (section \ref{sec:convergence}). However, one observes in practice that, after substantial progress in the first few sweeps during which the residual becomes very smooth, soon NGS stagnates. Following Brandt \cite{Brandt1977,BrandtLivne2011}, who asserts that such a stalling scheme must be ``wrong'', we adopt the multigrid approach next.
\section{The FAS equation for two levels} \label{sec:fastwolevel}
The fundamental goal of any multigrid scheme is to do a minimal amount of work (smoothing) on a given mesh and then to switch to an inexpensive coarser mesh to do the rest of the work. By transferring (restricting) a version of the problem to the coarser mesh one can nearly solve for the error. The coarse-mesh approximation of the error is then added-back (prolonged) to correct the solution on the finer mesh. Being a multigrid scheme, full approximation storage (FAS) \cite{Brandt1977,Briggsetal2000} must therefore include the following elements:
\renewcommand{\labelenumi}{(\roman{enumi})}
\begin{enumerate}
\item a hierarchy of meshes, with restriction and prolongation operators between levels,
\item a ``smoother'' for each level, and
\item a meaningful way to transfer the problem to a coarser mesh.
\end{enumerate}
Regarding (i), we describe only two levels at first, but a full mesh hierarchy is used in section \ref{sec:cycles}. Here our coarse mesh has spacing $2h$ and $m/2$ elements (subintervals); all quantities on the coarse mesh have superscript ``$2h$''. The program \texttt{fas1.py} only refines by factors of two, but the ideas generalize for other refinement factors.
For (ii), a small fixed number of NGS sweeps is our smoother on the fine mesh. Each sweep, given by algorithm \textsc{ngssweep} above, is an $O(m)$ operation with a small constant. (The constant is determined by the number of Newton iterations and the expense of evaluating nonlinearities at each point, e.g.~$\lambda e^u$ in \eqref{liouvillebratu}.) A few NGS sweeps produces the results that the fine-mesh residual $r^h(w^h)$ and algebraic error $e^h = w^h - u^h$ becomee smooth, but they do not necessarily become small. Using more sweeps of NGS would eventually make the error small, and solve problem \eqref{feweakform}, but inefficiently in the sense that many sweeps would be needed, generally giving an $O(m^q)$ method for $q\gg 1$. However, NGS sweeps on a coarser mesh will see the coarse-mesh interpolant of the fine-mesh residual as less smooth, so the coarser-mesh NGS can quickly eliminate a large fraction of the error. Descending to yet coarser meshes, in a V-cycle as described in section \ref{sec:cycles}, leads to a coarsest mesh on which the error can be eliminated entirely by applying NGS at only a few interior points. (In the default settings for \texttt{fas1.py}, the coarsest mesh has two subintervals and one interior point.)
For item (iii), what is the coarse-mesh version of the problem? To derive this equation, namely to explain Brandt's FAS equation \cite{Brandt1977}, we start from the FE weak form \eqref{feweakform}. The fine-mesh solution $u^h$ is generally unknown. For an iterate $w^h$ we subtract $F^h(w^h)[v]$ from both sides to get the residual \eqref{feresidual} on the right:
\begin{equation}
F^h(u^h)[v] - F^h(w^h)[v] = r^h(w^h)[v]. \label{fasproto}
\end{equation}
It is not yet the FAS equation, but three key observations apply to equation \eqref{fasproto}:
\begin{itemize}
\item Both $w^h$ and $r^h(w^h)$ are known and/or computable.
\item If NGS sweeps have been applied to $w^h$ then $e^h=w^h-u^h$ and $r^h(w^h)$ are smooth.
\item If $F^h$ were linear in $w^h$ then we could rewrite the equation in terms of the error:
$$\qquad\qquad\qquad\qquad F^h(e^h)[v] = -r^h(w^h)[v] \qquad\qquad (\text{\emph{if $F^h$ is linear}}).$$
(One could even write the error equation using a matrix, i.e.~$A\be=-\br$.)
\end{itemize}
Based on these observations, Brandt proposed proposed a new nonlinear equation on the coarse mesh. It is derived from \eqref{fasproto} by replacing terms using restriction operators on the computable quantities and by re-discretizing the nonlinear operator to get $F^{2h}$ acting on $\mathcal{S}^{2h}$. Because the problem is nonlinear we must store a coarse-mesh approximation to the solution, namely $u^{2h}$ in $\mathcal{S}^{2h}$, not just the error. Denoting the restriction operators by $R'$ and $R$, which are addressed in the next section, the following is the FAS equation:
\begin{equation}
F^{2h}(u^{2h})[v] - F^{2h}(R w^h)[v] = R' (r^h(w^h))[v], \label{faspreequation}
\end{equation}
for all $v$ in $\mathcal{S}^{2h}$. We can simplify the appearance by trivial rearrangement,
\begin{equation}
F^{2h}(u^{2h})[v] = \ell^h[v], \label{fasequation}
\end{equation}
where
\begin{equation}
\ell^{2h}[v] = R' (r^h(w^h))[v] + F^{2h}(R w^h)[v]. \label{fasell}
\end{equation}
The key idea behind the FAS equation \eqref{fasequation}, which has the same form as the fine-mesh weak form \eqref{feweakform}, is that the smoothness of the error and residual have allowed us to accurately transfer the problem to the coarser mesh. Note that if $w^h=u^h$, that is, if $w^h$ is the exact solution to the fine-mesh problem \eqref{feweakform}, then $r^h(w^h)=0$ so $\ell^{2h}$ simplifies to $F^{2h}(R w^h)[v]$, and the solution of \eqref{fasequation} would be $u^{2h} = R w^h$ by well-posedness.
Next, in stating the two-level FAS method we will suppose \eqref{fasequation} is solved exactly, so $u^{2h}$ and the coarse-mesh error $u^{2h}-Rw^h$ are known. We will update the iterate on the finer mesh by adding a fine-mesh version the error:
\begin{equation}
w^h \gets w^h + P(u^{2h} - R w^h) \label{fasupdate}
\end{equation}
Here $P$ is a prolongation operator (next section); it extends a function in $\mathcal{S}^{2h}$ to a function in $\mathcal{S}^h$. Supposing that the smoother and the restriction/prolongation operators $R',R,P$ are all determined, formulas \eqref{fasequation}, \eqref{fasell}, and \eqref{fasupdate} define the following in-place algorithm in which $F^h$ and $F^{2h}$ denote discretizations of $F$ on the two meshes:
\label{fastwolevel}
\begin{pseudo*}
\pr{fas-twolevel}(w^h,\ell^h,\id{down}=1,\id{up}=1)\text{:} \\+
for $j=1,\dots,$\id{down} \\+
\pr{ngssweep}(w^h,\ell^h) \\-
$\ell^{2h}[v] := R' (\ell^h-F^h(w^h))[v] + F^{2h}(R w^h)[v]$ \\
$w^{2h} = \pr{copy}(R w^h)$ \\
\pr{coarsesolve}(w^{2h},\ell^{2h}) \\
$w^h \gets w^h + P(w^{2h} - R w^h)$ \\
for $j=1,\dots,$\id{up} \\+
\pr{ngssweep-back}(w^h,\ell^h)
\end{pseudo*}
We allow smoothing before and after the coarse-mesh correction. Specifically, \texttt{down} forward NGS sweeps modify $w^h$ before the coarse-mesh correction and \texttt{up} backward sweeps after.
While it is common in linear multigrid \cite{Briggsetal2000,Bueler2021,Trottenbergetal2001} to apply a direct solver like LU decomposition as the coarse-mesh solver, our problem is nonlinear so no finite-time direct solver is available. Instead we do enough NGS sweeps to solve the coarse-mesh problem accurately:
\begin{pseudo*}
\pr{coarsesolve}(w,\ell,\id{coarse}=1)\text{:} \\+
for $j=1,\dots,$\id{coarse} \\+
\pr{ngssweep}(w,\ell)
\end{pseudo*}
In order to implement FAS we must define the action of operators $R'$, $R$, and $P$ in \eqref{fasell} and \eqref{fasupdate},which is done next. In section \ref{sec:cycles} we will define an FAS V-cycle by replacing \textsc{coarsesolve} with the recursive application of the FAS solver itself.
\section{Restriction and prolongation operators} \label{sec:restrictionprolongation}
To explain the two different restriction operators $R'$ and $R$ in \eqref{fasequation}, plus the prolongation $P$ in \eqref{fasupdate}, first note that functions $w^h$ in $\mathcal{S}^h$ are distinct objects from linear functionals like the residual $r^h(w^h)$. Denoting such linear functionals by $(\mathcal{S}^h)'$, the three operators are already distinguished by their domain and range spaces:
\begin{align}
R' &: (\mathcal{S}^h)' \to (\mathcal{S}^{2h})', \label{rpoperators} \\
R &: \mathcal{S}^h \to \mathcal{S}^{2h}, \notag \\
P &: \mathcal{S}^{2h} \to \mathcal{S}^h. \notag
\end{align}
On the other hand, both functions in $\mathcal{S}^h$ and linear functionals in $(\mathcal{S}^h)'$ are representable by vectors in $\RR^{m-1}$. One stores a function $w^h$ via coefficients $w[p]$ with respect to an expansion in the hat function basis $\{\psi_p\}$, as in \eqref{fesolution} for example, while one stores a functional $\ell^h$ by its values $\ell^h[\psi_p]$. Though it makes sense to represent $w^h$ as a column vector and $\ell^h$ as a row vector \cite{TrefethenBau1997}, in Python one may use ``flat'' one-dimensional NumPy arrays for both purposes. For our problem an iterate $w^h$ has zero boundary values, and likewise $\ell^h$ acts on $v$ with zero boundary values, thus only interior-point hat functions are needed in these representations.
But how do $R'$, $R$, and $P$ actually operate in the finite element (FE) case? The key calculation relates the coarse-mesh hat functions $\psi_q^{2h}(x)$ to the fine mesh hats $\psi_p^h(x)$ (Figure \ref{fig:hatcombination}):
\begin{equation}
\psi_q^{2h}(x) = \frac{1}{2} \psi_{2q-1}^h(x) + \psi_{2q}^h(x) + \frac{1}{2} \psi_{2q+1}^h(x), \label{hatrelation}
\end{equation}
for $q=1,2,\dots,M-1$. Recall that $M=m/2$, and that we are assuming $m$ is even.
\begin{figure}
\includegraphics[width=0.6\textwidth]{figs/hatcombination.pdf}
\caption{Formula \eqref{hatrelation} writes a coarse-mesh hat $\psi_q^{2h}(x)$ (solid) as a linear combination of fine-mesh hats $\psi_p^h(x)$ (dotted) for $p=2q-1,2q,2q+1$.}
\label{fig:hatcombination}
\end{figure}
First consider the prolongation $P$. Because a piecewise-linear function on the coarse mesh is also a piecewise-linear function on the fine mesh, $P$ is defined as the injection of $\mathcal{S}^{2h}$ into $\mathcal{S}^h$, without changing the function. Suppose $w^{2h}(x)$ is in $\mathcal{S}^{2h}$, so $w^{2h}(x) = \sum_{q=1}^{M-1} w[q] \psi_q^{2h}(x)$. Then we use \eqref{hatrelation} to compute $P w^{2h}$ in terms of fine-mesh hat functions:
\begin{align}
(P w^{2h})(x) &= \sum_{q=1}^{M-1} w[q] \left(\frac{1}{2} \psi_{2q-1}^h(x) + \psi_{2q}^h(x) + \frac{1}{2} \psi_{2q+1}^h(x)\right) \label{pformula} \\
&= \frac{1}{2} w[1] \psi_1^h(x) + w[1] \psi_2^h(x) + \left(\frac{1}{2} w[1] + \frac{1}{2} w[2]\right) \psi_3^h(x) + w[2] \psi_4^h(x) \notag \\
&\qquad + \left(\frac{1}{2} w[2] + \frac{1}{2} w[3]\right) \psi_5^h(x) + \dots + w[M\!-\!1] \psi_{m-2}^h(x) \notag \\
&\qquad + \frac{1}{2} w[M\!-\!1] \psi_{m-1}^h(x) \notag
\end{align}
As a matrix, $P:\RR^{M-1} \to \RR^{m-1}$ acts on vectors; it has $M-1$ columns and $m-1$ rows:
\begin{equation}
P = \begin{bmatrix}
1/2 & & & \\
1 & & & \\
1/2 & 1/2 & & \\
& 1 & & \\
& 1/2 & 1/2 & \\
& & & \ddots
\end{bmatrix} \label{pmatrix}
\end{equation}
The columns of $P$ are linearly-independent and the column sums equal two by \eqref{hatrelation}. The row sums equal one except for the first and last rows.
Next, the restriction $R'$ acts on fine-mesh linear functionals $\ell:\mathcal{S}^h \to \RR$. It is called ``canonical restriction'' \cite{GraeserKornhuber2009} because its output, the functional $R'\ell:\mathcal{S}^{2h}\to \RR$, acts on coarse-mesh functions the same way as $\ell$ itself acts on those functions, so defining $R'$ involves no choices. We may state this using $P$:
\begin{equation}
(R'\ell)[v] = \ell[Pv], \label{rprimedefinition}
\end{equation}
for $v$ in $\mathcal{S}^{2h}$. As noted earlier, $\ell$ is represented by a vector in $\RR^{m-1}$ of the values $\ell[\psi_p^h]$, so one computes the values of $R'\ell$ using \eqref{hatrelation}:
\begin{align}
(R'\ell)[\psi_q^{2h}] &= \ell[\psi_q^{2h}] = \ell\left[\frac{1}{2} \psi_{2q-1}^h + \psi_{2q}^h + \frac{1}{2} \psi_{2q+1}^h\right] \label{rprimeformula} \\
&= \frac{1}{2} \ell[\psi_{2q-1}^h] + \ell[\psi_{2q}^h] + \frac{1}{2} \ell[\psi_{2q+1}^h]. \notag
\end{align}
As a matrix $R'$ is the matrix transpose of $P$, with $M-1$ rows and $m-1$ columns:
\begin{equation}
R' = \begin{bmatrix}
1/2 & 1 & 1/2 & & & \\
& & 1/2 & 1 & 1/2 & \\
& & & & 1/2 & \\
& & & & & \ddots
\end{bmatrix} \label{rprimematrix}
\end{equation}
Finally we consider the restriction $R:\mathcal{S}^h\to\mathcal{S}^{2h}$ acting on functions, a more interesting map because it loses information. (By contrast, $P$ and $R'$ essentially preserve the input object, without loss, via reinterpretation on the output mesh.) Consider a fine-mesh function $w^h = \sum_{p=1}^{m-1} w[p] \psi_p^{h}$. The result $R w^h$ is linear across those fine-mesh nodes which are not in the coarse mesh, and so the values at those in-between nodes are not recoverable.
There are three well-known versions of the restriction $R$:
\begin{itemize}
\item $\Rpr$ is defined as projection, by the property
\begin{equation}
\ip{\Rpr w^h}{v} = \ip{w^h}{v} \label{rprdefinition}
\end{equation}
for all $v\in \mathcal{S}^{2h}$. Computing the entries of $\Rpr$ requires solving a linear system. To show this system we define the invertible, sparse, symmetric mass matrices \cite{Elmanetal2014}, namely $Q_{jk}^{h} = \ip{\psi_j^{h}}{\psi_k^{h}}$ for the fine mesh and $Q_{jk}^{2h} = \ip{\psi_j^{2h}}{\psi_k^{2h}}$ for the coarse. Then one solves a matrix equation for $\Rpr$:
\begin{equation}
Q^{2h} \Rpr = R' Q^{h}, \label{rprequation}
\end{equation}
or equivalently $\Rpr = (Q^{2h})^{-1} R' Q^{h}$. Equation \eqref{rprequation} is justified by using $v=\psi_s^{2h}$ in definition \eqref{rprdefinition}, and then applying \eqref{hatrelation}, as follows. Write $z = \Rpr w^h = \sum_{q=1}^{M-1} z[q] \psi_q^{2h}$ and expand both sides:
\begin{align*}
\ip{z}{\psi_s^{2h}} &= \ip{w^h}{\psi_s^{2h}} \\
\sum_{q=1}^{M-1} z[q] \ip{\psi_q^{2h}}{\psi_s^{2h}} &= \sum_{p=1}^{m-1} w[p] \ip{\psi_p^{h}}{\frac{1}{2} \psi_{2s-1}^{h} + \psi_{2s}^{h} + \frac{1}{2} \psi_{2s+1}^{h}} \\
\sum_{q=1}^{M-1} Q_{sq}^{2h} z[q] &= \sum_{p=1}^{m-1} \left(\frac{1}{2} Q_{2s-1,p} + Q_{2s,p} + \frac{1}{2} Q_{2s+1,p}\right) w[p] \\
(Q^{2h} \Rpr w^h)[s] &= (R' Q^h w^h)[s]
\end{align*}
(Note $w^h$ in $\mathcal{S}^h$ and index $s$ are arbitrary.) In 1D the mass matrices $Q^{2h},Q^h$ are tridiagonal, thus each column of $\Rpr$ can be found by solving equation \eqref{rprequation} using an $O(M)$ algorithm \cite{TrefethenBau1997}, implying $O(M^2)$ work. While this is possible, and the result could even be found by hand in this case, the alternatives below are easier to implement.
\item $\Rin$ is defined as pointwise injection. Supposing $w^h = \sum_{p=1}^{m-1} w[p] \psi_p^{h}$,
\begin{equation}
\Rin w^h = \sum_{q=1}^{M-1} w[2q] \psi_q^{2h}, \label{rindefinition}
\end{equation}
so $(\Rin w^h)(x_q) = w^h(x_q) = w[2q]$ for each point $x_q$. In other words, to compute $\Rin w^h$ we simply drop the nodal values at those fine-mesh nodes which are not in the coarse mesh. As a matrix this is
\begin{equation}
\Rin = \begin{bmatrix}
0 & 1 & & & & &\\
& & 0 & 1 & & & \\
& & & & 0 & 1 & \\
& & & & & & \ddots
\end{bmatrix}. \label{rinmatrix}
\end{equation}
This restriction is very simple but it may lose track of the magnitude of $w^h$, or badly mis-represent it, \emph{if} the input is not smooth. For example, sampling a sawtooth function at the coarse-mesh nodes would capture only the peaks or only the troughs.
\item $\Rfw$, the ``full-weighting'' restriction \cite{Briggsetal2000}, averages nodal values onto the coarse mesh:
\begin{equation}
\Rfw w^h = \sum_{q=1}^{M-1} \left(\frac{1}{4} w[2q-1] + \frac{1}{2} w[2q] + \frac{1}{4} w[2q+1]\right) \psi_q^{2h}. \label{rfwdefinition}
\end{equation}
This computes each coarse-mesh nodal value of $z=\Rfw w^h$ as a weighted average of the value of $w^h$ at the three closest fine-mesh nodes. The matrix is thus a multiple of the canonical restriction matrix in \eqref{rprimematrix}:
\begin{equation}
\Rfw = \begin{bmatrix}
1/4 & 1/2 & 1/4 & & & \\
& & 1/4 & 1/2 & 1/4 & \\
& & & & 1/4 & \\
& & & & & \ddots
\end{bmatrix} = \frac{1}{2} R'. \label{rfwmatrix}
\end{equation}
\end{itemize}
\medskip
Which restriction do we choose? Because of their simplicity, we will implement and compare $\Rfw$ and $\Rin$ in \texttt{fas1.py}.
\section{Cycles} \label{sec:cycles}
The main principles of the FAS scheme are already contained in the \textsc{fas-twolevel} algorithm in section \ref{sec:fastwolevel}, from which it is a small step to solve the coarse-mesh problem by the same scheme, creating a so-called ``V-cycle''. To define this precisely we need an indexed hierarchy of mesh levels. Start with a coarsest mesh with $m_0$ elements of length $h_0=1/m_0$. (By default in \texttt{fas1.py} we have $m_0=2$.) For $k=1,\dots,K$ we refine by factors of two so that the $k$th mesh has $m_k=2^k m_0$ elements of length $h_k=h_0/2^k$. The final $K$th mesh is now called the ``fine mesh''. Instead of the superscripts $h$ and $2h$ used in section \ref{sec:fastwolevel}, now we use a ``$k$'' superscript to indicate the mesh on which a quantity lives.
On this hierarchy an FAS V-cycle is the following in-place recursive algorithm:
\begin{pseudo*}
\pr{fas-vcycle}(k,w^k,\ell^k,\id{down}=1,\id{up}=1)\text{:} \\+
if $k=0$ \\+
\pr{coarsesolve}(w^0,\ell^0) \\-
else \\+
for $j=1,\dots,$\id{down} \\+
\pr{ngssweep}(w^k,\ell^k) \\-
$w^{k-1} = \pr{copy}(R w^k)$ \\
$\ell^{k-1}[v] := R' (\ell^k-F^k(w^k))[v] + F^{k-1}(R w^k)[v]$ \\
\pr{fas-vcycle}(k-1,w^{k-1},\ell^{k-1}) \\
$w^k \gets w^k + P(w^{k-1} - R w^k)$ \\
for $j=1,\dots,$\id{up} \\+
\pr{ngssweep-back}(w^k,\ell^k) \\-
\end{pseudo*}
Observe that the meaning of ``$\ell^k$'' depends on the mesh level. On the fine level it is $\ell^K[v] = \ip{g}{v}$, as in \eqref{ferhs}, but on coarser levels it is determined by the nontrivial FAS formula \eqref{fasell}. Also note that \textsc{fas-vcycle} does in-place modification of the coarse-mesh iterate $w^{k-1}$. A V-cycle with $K=3$ is shown in Figure \ref{fig:cycles}.
\begin{figure}
\input{tikz/cycles.tex}
\caption{An FAS V-cycle (left) and F-cycle (right) on a mesh hierarchy with four levels ($K=3$). Solid dots are \texttt{down} sweeps of NGS, open circles are \texttt{up} sweeps, and squares are \textsc{coarsesolve}. Thick grey edges show $\hat P$.}
\label{fig:cycles}
\end{figure}
V-cycles can be iterated to solve problem \eqref{feweakform} to desired accuracy. We put this in a pseudocode for clarity:
\begin{pseudo*}
\pr{fas-solver}(w^K,\id{rtol}=10^{-4},\id{cyclemax}=100)\text{:} \\+
$\ell^K[v] = \ip{g}{v}$ \\
$r_0 = \|\ell^K - F^K(w^K)\|$ \\
for $s=1,\dots,\id{cyclemax}$ \\+
\pr{fas-vcycle}(K,w^K,\ell^K) \\
if $\|\ell^K-F^K(w^K)\| < \id{rtol}\,r_0$ \\+
break \\--
return $w^K$
\end{pseudo*}
Our Python code \texttt{fas1.py} implements \pr{fas-vcycle} and \pr{fas-solver}, and options \texttt{-rtol}, \texttt{-cyclemax} override the defaults for the latter. As is easily seen by experimentation, and as we will demonstrate in the next two sections, 7 to 12 V-cycles, using the default settings in \textsc{fas-vcycle} including \texttt{down} $=1$ and \texttt{up} $=1$ smoother applications, make a very effective solver on any mesh.
But we can add a different multilevel idea to get a new kind of cycle. It is based on the observation that an iterative equation solver, linear or nonlinear, often depends critically on the quality of its initial iterate. Indeed, choosing initial iterate $w^K=0$ and calling \textsc{fas-solver} may not yield a convergent method. However, one finds in practice that coarse meshes are more forgiving with respect to the initial iterate than are finer meshes. Now the new idea is to start on the coarsest mesh in the hierarchy, where a blind guess like $w^0=0$ is most likely to succeed, and then work upward through the levels. At each mesh level one computes an initial iterate by prolongation of a nearly-converged iterate on the previous level, and then one does a V-cycle. At the finest mesh level we may do repeated V-cycles.
The resulting algorithm is called an FAS multigrid ``F-cycle'' because the pattern in Figure \ref{fig:cycles} (right) looks vaguely like an ``F'' on its back; it is the following algorithm:
\begin{pseudo*}
\pr{fas-fcycle}(K,\id{down}=1,\id{up}=1)\text{:} \\+
$w^0 = 0$ \\
$\ell^0[v] = \ip{g}{v}$ \\
\pr{coarsesolve}(w^0,\ell^0) \\
for $k=1,\dots,K$ \\+
$w^k = \hat P w^{k-1}$ \\
$\ell^k[v] = \ip{g}{v}$ \\
\pr{fas-vcycle}(k,w^k,\ell^k) \\-
return $w^K$
\end{pseudo*}
Note that parameters \id{down} and \id{up} are passed into the V-cycle.
This algorithm is also called a ``full multigrid'' (FMG) cycle \cite{BrandtLivne2011,Briggsetal2000}, but the meaning of ``full'' is fundamentally different in FAS versus FMG terminology. One may run \pr{fas-fcycle} to generate the initial iterate for \pr{fas-solver}, as as we will see in section \ref{sec:performance}, the result of one F-cycle is already a very good solution.
It is important to avoid the introduction of high frequencies as one generates the first iterate on the finer mesh. Thus a coarse-mesh solution is prolonged on to the next level by a possibly-different operator:
\begin{equation}
w^k = \hat P w^{k-1} \label{enhancedprolongation}
\end{equation}
It is common for a better interpolation scheme to be used for $\hat P$ than for $P$ \cite{Trottenbergetal2001}. Our choice for $\hat P$ first applies $P$ to generate a fine-mesh function, but followed by sweeping once through the \emph{new} fine-mesh nodes and applying NGS there without altering values at the nodes already present in the coarse mesh. This $\hat P$ is half of a smoother, and counted as such; see section \ref{sec:performance}.
\section{Convergence} \label{sec:convergence}
The Python program \texttt{fas1.py} accompanying this note applies \pr{fas-solver} by default, with zero initial iterate, to solve equation \eqref{liouvillebratu}. The program depends only on the widely-available NumPy library \cite{Harrisetal2020}. It imports local modules \texttt{meshlevel.py}, \texttt{problems.py}, and \texttt{cycles.py} from the same directory.
To get started, clone the Git repository and run the program:
\begin{cline}
$ git clone https://github.com/bueler/mg-glaciers.git
$ cd mg-glaciers/fas/py/
$ ./fas1.py
m=8 mesh, 6 V(1,1) cycles (19.50 WU): |u|_2=0.102443
\end{cline}
%$
Various allowed options to \texttt{fas1.py} are shown by usage help:\footnote{Also, a small suite of software (regression) tests of \texttt{fas1.py} is run with \,\texttt{make test}.}
\begin{cline}
$ ./fas1.py -h
\end{cline}
%$
For example, choosing a mesh with $m=2^{K+1}=16$ elements and a problem with known exact solution (section \ref{sec:intro}), yields Figure \ref{fig:show}:
\begin{cline}
$ ./fas1.py -K 3 -mms -show
m=16 mesh, 6 V(1,1) cycles (21.75 WU): ... |u-u_ex|_2=2.1315e-02
\end{cline}
%$
The V-cycles in this run, exactly as shown in Figure \ref{fig:cycles}, are reported as ``\texttt{V(1,1)}'' because the defaults correspond to \texttt{down} $=1$ and \texttt{up} $=1$ NGS sweeps on each level. Note that runs with option \texttt{-mms} report the final numerical error $\|w^h-u\|_2$.
\begin{figure}
\includegraphics[width=0.8\textwidth]{figs/show.pdf}
\caption{Results from a \texttt{-mms} run of \texttt{fas1.py} on $m=16$ elements.}
\label{fig:show}
\end{figure}
By using the \texttt{-mms} case we can demonstrate convergence of our implemented FE method, and thereby verify \texttt{fas1.py}. The numerical error from runs with 12 V-cycles, i.e.~with options \texttt{-rtol 0 -cyclemax 12}, and $K=3,4,\dots,14$ corresponding to $16\le m \le 32768$ elements, are shown in Figure \ref{fig:converge}. Because our problem is so simple, with a very smooth solution, the convergence rate is exactly at the expected rate $O(h^2)$ \cite{Elmanetal2014}.
However, if instead of a small, fixed number of V-cycles we instead try a large number of NGS sweeps, e.g.~we apply the algorithm below with \texttt{-rtol 0 -cyclemax 10000} and zero initial iterate, then the results are disappointing.
\begin{pseudo*}
\pr{ngsonly}(w^K,\id{rtol}=10^{-4},\id{cyclemax}=100)\text{:} \\+
$\ell^K[v] = \ip{g}{v}$ \\
$r_0 = \|\ell^K - F^K(w^K)\|$ \\
for $s=1,\dots,\id{cyclemax}$ \\+
\pr{ngssweep}(w^K,\ell^K) \\
if $\|\ell^K-F^K(w^K)\| < \id{rtol}\,r_0$ \\+
break \\--
return $w^K$
\end{pseudo*}
As shown in Figure \ref{fig:converge}, such runs generates convergence to discretization error only on meshes with $m=16,32,64,128$. For slightly finer meshes ($m=256,512$) the same number of sweeps is no longer sufficient, and continuing to yet finer meshes using the same number of sweeps would make essentially no progress (not shown). The reason for this behavior is that almost all of the algebraic error (section \ref{sec:femethod}) is in low-frequency modes which the NGS sweeps are barely able to reduce. This is the situation which multigrid schemes are designed to address \cite{BrandtLivne2011,Briggsetal2000}: by moving the problem between meshes the same smoother will efficiently-reduce all frequencies present in the error. Both the smoother and the coarse-level solver components of our FAS algorithms consist entirely of NGS sweeps, but by adding a multilevel mesh infrastructure we have arranged that the sweeps are always making progress.
\begin{figure}
\includegraphics[width=0.7\textwidth]{figs/converge.pdf}
\caption{For a fixed number of V-cycles the numerical error $\|u-u_{\text{ex}}\|_2$ converges to zero at the expected rate $O(h^2)$. Even $10^4$ NGS sweeps fail to converge at higher resolutions.}
\label{fig:converge}
\end{figure}
\section{Performance} \label{sec:performance}
Having verified our method, our first performance test compares three solver algorithms:
\begin{itemize}
\item \textsc{fas-fcycle}, defined in section \ref{sec:cycles}.
\item \textsc{fas-solver}, which does V-cycles, defined in section \ref{sec:cycles}.
\item \textsc{ngsonly}, defined in section \ref{sec:convergence}.
\end{itemize}
The two FAS algorithms actually represent many different algorithms according to the different options. While making no attempt to systematically-explore the parameter space, we observe that 7 to 12 V(1,1) cycles suffice to approach discretization error in the \texttt{-mms} problem. For F-cycles we must choose how many V-cycles to take once the finest level is reached, and 2 or 3 certainly suffice. Experimentation in minimizing the work units (below), while maintaining convergence, yields a choice of three V(1,0) cycles.
The three chosen algorithms become the following specific \texttt{fas1.py} options on meshes with $m=2^{K+1}$ elements for $K=3,4,\dots,17,18$:
\medskip
\begin{tabular}{ll}
\textsf{F-cycle$+$3$\times$V(1,0)} \,: &\texttt{-mms -fcycle -rtol 0 -cyclemax 4 -up 0 -K }$K$ \\
\textsf{12 V(1,1) cycles} \,: &\texttt{-mms -rtol 0 -cyclemax 12 -K }$K$ \\
\textsf{NGS sweeps} \,: &\texttt{-mms -rtol 0 -cyclemax $Z$ -ngsonly -K }$K$
\end{tabular}
\medskip
In order to achieve convergence for NGS sweeps alone, we must choose rapidly increasing $Z$ as $K$ increases. For the comparison below we simply double $Z$ until the reported numerical error is within a factor of two of discretization error (as reported by the FAS algorithms), but at $K=7$ the time is 100 seconds and we stop testing.
The results for run time on the author's laptop are in Figure \ref{fig:optimal}. For all the coarser meshes, e.g.~$m=16,\dots,256$, the FAS algorithms run in about 0.3 seconds. This is the minimum time to start and run any Python program on this machine, so the actual computational time is not really detected. For $m \ge 10^3$ both FAS algorithms enter into a regime where the run time is greater than one second, and then it becomes proportional to $m$. That is, their solver complexity is $O(m^1)$. These are \emph{optimal} solvers \cite[Chapter 7]{Bueler2021}.
By contrast, the \pr{ngsonly} algorithm is far from optimal, and not capable of solving on fine meshes. Fitting the three finest-mesh completed cases suggests its time is $O(m^{3.5})$.
\begin{figure}
\includegraphics[width=0.7\textwidth]{figs/optimal.pdf}
\caption{Run time to reach discretization error is optimal $O(m)$ for both V-cycles and F-cycles. Run time explodes for NGS sweeps.}
\label{fig:optimal}
\end{figure}
A standard way to compare multigrid-type solver algorithms uses the concept of a \emph{work unit} (WU). One WU is the number of operations needed to do one smoother sweep on the finest mesh, which takes $O(m)$ arithmetic (floating point) operations. For WUs in a 1D multilevel scheme, note that a smoother sweep on the second-finest mesh is $\frac{1}{2}$WU, and so on downward in the hierarchy, so the total of WU for a multigrid algorithm is a finite geometric sum \cite{Briggsetal2000} which depends on the number of levels $K$. For simplicity we do not count the arithmetic work in restriction and prolongation, other than in the enhanced prolongation $\hat P$ in \eqref{enhancedprolongation}, which uses $\frac{1}{2}$WU when passing to the finest mesh. Also we ignore non-arithmetic work entirely, for example vector copies.
Consider the $K\to\infty$ limit of WU calculations for the three algorithms above:
\begin{align*}
\text{WU}\big(\text{\textsf{F-cycle$+Z\times$V(1,0)}}\big) &\approx 3+2Z \\
\text{WU}\big(\text{\textsf{$Z$ V(1,1) cycles}}\big) &\approx 4Z \\
\text{WU}\big(\text{\textsf{$Z$ NGS sweeps}}\big) &= Z
\end{align*}
(Note that counting work units for NGS sweeps is trivial.) To confirm this we have added WU counting to \texttt{fas1.py}. On a $K=10$ mesh with $m=2^{11}=2048$ elements, for example, we observe that \textsf{F-cycles$+$3$\times$V(1,0)} requires a measured 8.98 WU while \textsf{12 V(1,1) cycles} uses 47.96 WU.
In fact a single F-cycle, without any additional V-cycles, nearly reaches discretization error. Consider three single-F-cycle schemes. The first is ``F(1,1)'', which uses the default settings \id{down}=1 and \id{up}=1. The other two are ``F(1,0)'', using \id{up}=0, and ``F(1,0)+$\Rin$'', which changes from the default full-weighting restriction ($\Rfw$) to injection ($\Rin$). These three solvers use 9, 5, and 5 WU, respectively, in the $K\to\infty$ limit of many levels.
Figure \ref{fig:tme} shows that on $K=7,\dots,18$ meshes, with up to $m=2^{19} = 5 \times 10^5$ elements, the measured numerical error is within a factor of two of discretization error. Note that the F(1,0) cycles actually generate smaller errors, and there is no significant difference between the two restriction methods. On the finest mesh it seems the discretization error itself, of order $10^{-11}$, was corrupted by rounding errors, and so all the measured numerical errors are closer together. Noting that some multigrid authors \cite[for example]{BrownSmithAhmadia2013} use ``textbook multigrid efficiency'' if fewer than 10 WU are needed to achieve discretization error, we conclude that our F-cycles exhibit textbook multigrid efficiency.
\begin{figure}
\includegraphics[width=0.7\textwidth]{figs/tme.pdf}
\caption{Computed numerical error, relative to discretization error, from three versions of a single F-cycle.}
\label{fig:tme}
\end{figure}
\section{Extensions} \label{sec:extensions}
Our program \texttt{fas1.py} is deliberately basic in many senses. Here are three possible extensions which the reader might want to implement:
\renewcommand{\labelenumi}{\textbf{\Roman{enumi}.}}
\begin{enumerate}
\item The default value of the parameter $\lambda$ in \eqref{liouvillebratu} is \texttt{-lam 1.0}, but one can check that the $g=0$ problem becomes unstable at a critical value $\lambda_c \approx 3.5$. Interestingly, the solution changes very little as $\lambda \nearrow \lambda_c$; things are boring until failure occurs. (The most-common numerical symptom is overflow of $e^u$.) Equation \eqref{liouvillebratu} is a very simple model for combustion of a chemical mixture, and this instability corresponds to a chemical explosion \cite{FrankKameneckij1955}. However, finding $\lambda_c$ precisely is not easy because \texttt{fas1.py} always initializes at the distant location $w^0=0$. The behavior of FAS F-cycles is especially nontrivial near the critical $\lambda$ because the critical value is different on coarse grids. (And apparently sometimes smaller!) A better strategy for solutions near the critical value, and for parameter studies generally, is ``continuation''. For example, one might use a saved fine-mesh solution as the initial value in a run with a slightly-different $\lambda$ value. The new run would then only need a few V-cycles.
\item Equation \eqref{liouvillebratu} is a ``semilinear'' ODE because its nonlinearity occurs in the zeroth-derivative term \cite{Evans2010}. One might instead solve a ``quasilinear'' equation where the nonlinearity is in the coefficient to the highest-order derivative. For example, one might try a $p$-Laplacian \cite{Evans2010} extension to the Liouville-Bratu equation:
\begin{equation}
-\left(|u'|^{p-2} u'\right)' - \lambda e^u = g. \label{pbratu}
\end{equation}
This equation is the same as \eqref{liouvillebratu} when $p=2$, but for other values $p$ in $(1,\infty)$ the solution is less well-behaved because the coefficient of $u''$ can degenerate or explode. However, a literature at least exists for the corresponding Poisson problem with $\lambda=0$ \cite{BarrettLiu1993,Bueler2021}. A basic technique is to regularize the leading coefficient with a numerical parameter $\eps>0$: replace $|u'|^{p-2}$ with $\left(|u'|^2+\eps\right)^{(p-2)/2}$. With such a change, continuation (item \textbf{II}) will be both important and more complicated.
\item The most significant extension of \texttt{fas1.py} would be to ``merely'' change from 1D to 2D or 3D. That is, to change from solving ODEs to solving elliptic PDEs like $-\grad^2 u - \lambda e^u=g$, where $\grad^2$ is the Laplacian operator. However, doing this in the style of \texttt{fas1.py}, using only NumPy vectors for infrastructure, is not recommended. Instead, it would be wise to apply an FE library like Firedrake \cite{Rathgeberetal2016} or Fenics \cite{Loggetal2012}, on top of an advanced solver library like PETSc \cite{Balayetal2021,Bueler2021}. Such libraries involve a substantial learning curve, and their support for FAS multigrid methods is incomplete, but they allow experimentation with higher-order FE spaces and many other benefits.
\end{enumerate}
\section{Conclusion} \label{sec:conclusion}
Regarding the performance of the solvers in the last few sections, we summarize as follows:
\begin{quotation}
\emph{On any mesh of $m$ elements, problem \eqref{feweakform} can be solved nearly to the discretization error of our piecewise-linear FE method by using a single FAS F-cycle, or a few FAS V-cycles, and the work of these methods is $O(m)$ with a small constant; they are optimal. The faster F-cycle gives textbook multigrid efficiency. These facts holds for all $m$ up until rounding errors overwhelm the discretization at around $m=10^6$. By contrast, single-level NGS requires rapidly-increasing numbers of sweeps because the work scales as $O(m^q)$ for $q\gg 1$. For example, more than $10^3$ sweeps are required if $m>10^2$, and if $m>10^3$ then discretization error cannot be achieved by single-level NGS sweeps in reasonable time.}
\end{quotation}
\small
\bigskip
\bibliography{fas}
\bibliographystyle{siam}
\end{document}
| {
"alphanum_fraction": 0.7109853298,
"avg_line_length": 85.1508196721,
"ext": "tex",
"hexsha": "b2539e1327c615ccbffb64cd067697a5846dc211",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "649c323f18f31a332c0845bf3955d201cd4c3cf8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bueler/mg-glaciers",
"max_forks_repo_path": "fas/doc/fas.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "649c323f18f31a332c0845bf3955d201cd4c3cf8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bueler/mg-glaciers",
"max_issues_repo_path": "fas/doc/fas.tex",
"max_line_length": 1259,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "649c323f18f31a332c0845bf3955d201cd4c3cf8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bueler/mg-glaciers",
"max_stars_repo_path": "fas/doc/fas.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 16568,
"size": 51942
} |
\documentclass[conference]{IEEEtran}
% \IEEEoverridecommandlockouts
% The preceding line is only needed to identify funding in the first footnote. If that is unneeded, please comment it out.
\usepackage{cite}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{hyperref}
%\usepackage[ngerman]{cleveref}
\usepackage[english]{cleveref}
% \usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{textcomp}
\usepackage{xcolor}
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
\begin{document}
\title{Text Analytica: cloud-based document analysis\\}
\author{\IEEEauthorblockN{Florian Bauer}
\IEEEauthorblockA{\textit{Department of Computer Science} \\
\textit{University of Bristol}\\
Bristol, United Kingdom \\
[email protected]} \\
97133
\and
\IEEEauthorblockN{Nathalie Pett}
\IEEEauthorblockA{\textit{Department of Computer Science} \\
\textit{University of Bristol}\\
Bristol, United Kingdom \\
[email protected]}\\
97059
}
\maketitle
\begin{abstract}
Text Analytica is a cloud-based web service for document analysis. The prototype introduced and discussed in this report currently accepts txt file uploads and extracts the three most common words. It is hosted on \textit{Microsoft Azure} and makes use of serveral cloud computing technologies. Text Analytica runs on Docker containers orchestrated by a Kubernetes cluster. The data generated from the text analysis is stored in an \textit{Azure Cosmos DB}. All technologies used were chosen for their positive impact on scalabitity. The scalability of Text Analytica was examined by the means of a load test. The source code is available at https://github.com/darkcookie298/CloudComputing. The application can be run online at http://textanalytica.lukaspman.io/.
\end{abstract}
\section{Introduction}
\label{sec:intro}
Text Analytica is a cloud-based web service aiming to support the analysis of text documents. For the purpose of this coursework a prototype has been developed, deployed and evaluated using different cloud computing technologies. In the remainder of this section, the concept of Text Analytica is discussed as well as the limitations of the implemented prototype. \Cref{sec:platform-choice} explores the reasons for choosing Microsoft's cloud services over Oracle's. Afterwards, in \Cref{sec:system-architecture}, the system architecture and technologies used for the project are explained in more detail. Different aspects of the service's scalability are addressed in \Cref{sec:scalability}, including evidence of the performance of the service under load. Lastly, future improvements of Text Analytica are proposed in \Cref{sec:future-improvements}.
\begin{figure*}[ht!]
\includegraphics[width=150mm]{img/architectur.png}
\caption{Text Analytica's system architecture: The main components of the system are a Kubernetes cluster based on AKS, orchestrating the containers running Text Analytica and a \textit{Microsoft Cosmos DB}, storing the results of the analysis. Each container is encapsulated by its own Kubernetes Pod. The images to build new containers are pulled from the \textit{Docker Hub} container registry. Azure monitoring is used to monitor the health of the cluster and its components. The Kubernetes cluster is exposed for external access through an \textit{Azure Load Balancer}.}
\label{img:architecture}
\end{figure*}
\subsection{Vision}
University students are often faced with an abundance of resources regarding specific units or even certain topics within a unit. These range from lecture notes or slides to personal notes and additional scientific papers as well as e-books or extracts thereof. The first step in the exploration process is for students to familiarise themselves with these materials by identifying the documents' key aspects and discovering links between different sources. This is what Text Analytica ultimately aims to facilitate.
More specifically the functionalities of Text Analytica could include tagging, keyword search, suggestions of related documents based on textual analysis and eventually the generation of short summaries, all based on user-supplied PDF documents. These functionalities render Text Analytica a useful tool for many scenarios in which people are confronted with a large number of different and possibly complex text sources, e.g. in the context of management decisions in industry or business / commerce.
\subsection{Limitations of the submitted prototype}
\label{subsec:limits}
The focus of this coursework assignment was to deploy an application using different cloud services and explore its scalability, while remaining within the proposed time frame. Therefore, the functionality of the submitted prototype was stripped down to a minimum.
To skip the step of extracting machine-readable text from PDFs by applying OCR techniques, currently users are only able to upload simple txt documents. These files are parsed and analysed. At this point the analysis merely tags the system entries with the three most common words in the text. The content of the text file is stored as a string as part of the analysis, therefore currently there is no reason to store the actual files within the application and they are discarded after being processed. Additionally, user account and login functionality has not yet been implemented.
\section{Platform choice}
\label{sec:platform-choice}
The considerations detailed below eventually led to choosing \textit{Microsoft Azure} instead of the services introduced in the lectures by Oracle to remain within the proposed time scope for the coursework.
\subsection{Setup}
\label{subsec:setup}
Given the tutorials in class as well as the $\pounds 3,500$ in credits granted to us for \textit{Oracle Cloud}, we initially intended to build our application based on their services. We started trying to set up a Kubernetes cluster with the \textit{Terraform} \cite{Terraform} command line tool, as was recommended to us. However, after having spent a significant amount of time on installing all necessary tools, e.g. \textit{Terraform} itself, the \textit{Terraform Provider OCI}\cite{TerraformProviderOCI} and the \textit{Terraform Kubernetes Installer for Oracle Cloud} \cite{TerrafromK8sInstaller}, we failed to set up an easy example cluster due to reaching service limits. These limits restrict how many instances of a specific resource in a zone one can use. To increase the limit a ticket with Oracle support must be opened, a process which is very slow and inconvenient. While \textit{Microsoft Azure} does also have such limits \cite{AzureLimits}, they are far more generous and therefore allowed us to work more efficiently, without depending on the reply of third parties.
Another inconvenience of \textit{Oracle Cloud} was the unstructured and inconsistent user interface of the control panel. This became especially apparent when comparing it to the user interfaces of Amazon, as demonstrated in the lecture, and Microsoft, which was eventually used to configure the cluster and database for Text Analytica.
In addition to this, \textit{Azure} made it very easy to work with third party SDKs as it provides good integration of these services within its own ecosystem.
\subsection{Documentation and support}
\label{subsec:docandsupport}
Working with one of ``the big three" cloud computing providers, that is with \textit{Google Cloud}, \textit{Amazon Web Services} or \textit{Microsoft Azure}, comes with considerable advantages, when it comes to documentation and support. Not only are there large numbers of tutorials, answered questions on \textit{Stackoverflow} and blog articles on their technologies, but they also come with very comprehensive documentations. The number of tutorials using Oracle's cloud services, however, was relatively small, making it difficult for beginners to get started. When running example code we encountered quite a few errors and bugs, but searching for solutions to these issues for example with \textit{Terraform} and \textit{Oracle Cloud} was often unsuccessful.
\subsection{Scalability}
In addition to the advantages mentioned in \Cref{subsec:setup} and \Cref{subsec:docandsupport} there is one more reason why we decided to migrate the project to \textit{Azure}. It offers the possibility to easily create \textit{virtual nodes} based on \textit{Virtual Kubelets}\cite{VirtualKubelet}, enabling faster scaling. A more detailed explanation of this technology is given in \Cref{sec:system-architecture}.
\section{System architecture and service implementation}
\label{sec:system-architecture}
\Cref{img:architecture} shows the architecture of the implemented system, which is discussed in the first two parts of this section in more detail. Text Analytica's two main components are a Kubernetes cluster, managed by the \textit{Azure Kubernetes Service} (AKS) \cite{AKS} and an \textit{Azure Cosmos DB} \cite{CosmosDB}, storing the results of the analysis. In the last part of this section, the implementation of the service and the continuous integration pipeline used to deploy the application are presented.
\subsection{Infrastructure}
\label{subsec:infra}
Text Analytica's front-end, back-end and analysis service are currently run within a single Docker container. Container orchestration is handled by a Kubernetes cluster managed by AKS, where each container is encapsulated by a Kubernetes Pod. When a new Pod is started up the corresponding container is created from a container image pulled from the container registry \textit{Docker Hub} \cite{DockerHub}.
The Kubernetes master node, responsible for the cluster operation, is fully managed by AKS. In addition to the master node the cluster consists of two worker nodes. The first one is based on a general purpose Linux VM, while the second is implemented as a \textit{virtual node} based on \textit{Azure Container Instances} (ACIs) \cite{AzureContainerInstances} and the \textit{Virtual Kubelet} open source project \cite{VirtualKubelet, VirtualKubeletGithub}. Kubernetes treats the ACIs compromising the virtual node like standard nodes, so new Pods can simply be provisioned on them. Based on container images, ACIs are ready to use in a few seconds as no virtual machines must be started up and managed by the user. In terms of service level, they could be described as ``containers-as-a-service".
The cluster is monitored using \textit{Azure Monitor} \cite{AzureMonitor}, a collection of tools to monitor, query and log services and infrastructure running on \textit{Azure}. It monitors the health of the cluster itself, its nodes and the running services and containers.
To make the Pods containing the containerised application available to the public several network measures are in place \cite{AKSNetworks, AzureExposeKubernetesCluster}. First, a Kubernetes Service of type \textit{NodePort} has been created to allow access to the Pods via IP address or DNS name and port. To expose the services of Text Analytica for external access another Kubernetes Service, a so called ingress Service was used. Next to application level load balancing, which at this point is not necessary for Text Analytica, as its services still all run within a single container, ingress can for example be used for SSL / TLS termination. Next to the ingress Service an NGINX ingress controller \cite{IngressController} is deployed on a Pod on each node. The ingress Service is of type \textit{LoadBalancer}, which leads \textit{Azure} to create and configure an \textit{Azure Load Balancer} resource with a corresponding external IP address.
\subsection{Data storage}
As mentioned in \Cref{subsec:limits} currently the only user data stored by Text Analytica is the results of the analysis and related metadata. This data is combined into a json object and sent to the \textit{Cosmos DB}.
\textit{Cosmos DB} is a ``globally-distributed, multi-model database" \cite{CosmosDB}. It was chosen for this project, because it is very easy to set up from the \textit{Azure} portal, is fully managed and scaled by \textit{Azure} and can be treated like a \textit{MongoDB} in development using an API \cite{CosmosMongoDB}.
To provide persistent storage not affected by dying and restarting containers, the database is decoupled from the Kubernetes cluster.
The data in the database is shielded from unwanted access, as it is only accessible from the virtual network in which the Kubernetes cluster is hosted.
\subsection{Service implementation}
Text Analytica's back-end and the analysis functionality are programmed in Python. The back-end uses the \textit{Flask} framework, which is a web development microframework \cite{Flask}. While the implementation of the back-end is based on several online resources such as tutorials and videos \cite{FlaskOnKubernetes, FlaskVue, FlaskMongo}, the front-end is based on a template using \textit{Vue.js} and \textit{Bootstrap} \cite{Bootstrap}.
To enhance the deployment of the webservice, a continuous delivery pipeline, in this case an \textit{Azure Pipeline} \cite{AzurePipeline}, was created. Whenever new code is pushed to Text Analytica's GitHub repository, a new Docker container image is built automatically using the Dockerfile and pushed to the \textit{Docker Hub} registry. These new images are then pulled by the AKS cluster to start updated containers to run the service. The service used to build this \textit{Azure Pipeline} is part of \textit{Azure DevOps}.
\begin{figure*}[ht!]
\includegraphics[width=170mm]{img/loadtest_01.png}
\caption{Results of a load test of the Kubernetes cluster. Most importantly from the \textit{Performance} chart it can be seen that Text Analytica's performance increases linearly when the number of users is increased. There is no significant loss in performance either.}
\label{img:loadtesting}
\end{figure*}
\section{Scalability}
\label{sec:scalability}
In a production environment, the number of Text Analytica users is expected to grow. As discussed in the lecture, sudden bursts in usage can sometimes not be foreseen \cite{Animoto}. Therefore, it was crucial to pay attention to the scalability of the application, to ensure a smooth and fast user experience independent of the number of parallel accesses. The application was built to handle an unknown number of users uploading and analysing files in parallel without running into errors or significant performance bottlenecks. In the remainder of this section it is discussed how this issue was addressed and results of a load test are shown.
\subsection{Infrastructure}
As mentioned in \Cref{sec:system-architecture} the web service is managed by a Kubernetes cluster. The main advantages of using a Kubernetes cluster are easy scalability and load balancing. More specifically, if usage of existing resources is high, the cluster will automatically scale \cite{KubernetesScaling, KubernetesAutoscaler}. In Text Analytica's case this might happen at peak times when a lot of people are using the Text Analytica web service. The cluster will then spawn more Pods on the existing \textit{Azure VM Node}, to handle the additional workload without sacrificing performance \cite{MicrosoftAzureKubernetesService}.
When the \textit{Azure VM Node} is used to capacity new Pods based on \textit{Virtual Kubelets} are provisioned on the virtual node. On the one hand this allows the application to start up new Pods faster, as there is no need to wait for new VMs being booted up as already explained in \Cref{subsec:infra}, on the other hand this enables ``infinite" scaling as there is no capacity limit for a virtual node based on ACIs. Due to higher prices this option should, however, not be used permanently and in case of observing lasting high usage, more traditional VM-based nodes should be provisioned. While the provisioning of new VM-based nodes could be automised, in the current implementation of Text Analytica they would still have to be added manually, based on monitoring observations.
\subsection{Data storage}
Text Analytica uses an instance of \textit{Azure's} \textit{Cosmos DB} for storage. The following part of this section discusses how scalability could be addressed in terms of the data storage.
\textit{Cosmos DB} supports two kinds of partitioning: logical and physical partitions. Logical partitions are such partitions, where all datasets or elements in a container share a property, which can serve as a partition key. In this case the data itself and the data throughput is automatically, horizontally partitioned and scaled by \textit{Cosmos DB}. Then there are the physical partitions. A number of logical partitions are assigned to one physical partition, which guarantees persistence and consistency of the data \cite{CosmosDBHorScal}. However, due to time constraints, currently Text Analytica's database entries have not yet been configured to have a partition key for logical partitioning. This is an improvement that eventually should be made.
\subsection{Monitoring and load testing}
To measure how well Text Analytica scales with an increasing workload, respectively more parallel users, we conducted a load test with the \textit{Azure DevOps} load testing solution \cite{AzureLoadTest}.
\Cref{img:loadtesting} displays the results of one of our load tests. The four diagrams show data for \textit{Performance}, \textit{Throughput}, \textit{Errors} and \textit{Tests}. The test lasted ten minutes and involved seven hundred virtual users at peak time, accessing the website. At this point we have not yet implemented a test case where actual files are uploaded to the application. From the \textit{Errors} diagram we observe that no errors occurred, which means our application did not fail even under high usage. The \textit{Performance} part contrasts the user load, the average response time as well as the average page time. Interpreting this result, we conclude Text Analytica has a good scaling rate, as the response time scales nearly linear with no major increase in latency. The \textit{Throughput} figure shows the pages/sec and requests/sec with increasing user load, and \textit{Tests} simply shows the tests/sec and average test time.
A useful tool for monitoring the Kubernetes cluster is the \textit{Kubernetes Dashboard}. It shows all important resources, e.g. the number of Pods running, CPU and memory usage. This tool was especially helpful for debugging.
\section{Future improvements}
\label{sec:future-improvements}
As mentioned before, because of the time frame proposed for the development of this coursework project and especially due to initial issues with Oracle's cloud services, the submitted version of Text Analytica is just a prototype. Therefore there are some improvements on different levels, which are discussed in this section.
\subsection{Infrastructure}
At the moment, as Text Analytica has not much functionality, all of it is run on one container. To allow more specific scaling of certain parts of the application, the services should be split up into microservices, each running on its own container. Based on the current implementation the functionality could be split up into back-end, front-end and analysis. Not only would this separation allow for a more specific scaling of the services, but it would also allow to implement ingress rules, so that the ingress can be used as an application layer load balancer.
Another way to optimise Text Analytica would be improved management of the Kubernetes cluster itself. There are several popular open source projects, such as \textit{Istio}, which can be used to efficiently manage a mesh of microservices \cite{Istio}.
A more drastic change to the system architecture would be the shift to serverless, implementing the different services as \textit{Azure functions} for example \cite{AzureFunctions}. Instead of the current IaaS approach, where some set up and maintenance has to be done by the user, this would mean using \textit{Azure's} services on a PaaS level, allowing developers to focus on the application's functionality, while provisioning of resources and scaling is handled completely automatically by \textit{Azure}.
\subsection{Service implementation}
As the focus of this coursework assignment was the application of cloud technologies and optimising the application for scaling, there are quite a few improvements on the service implementation level to be made in the future.
Firstly, there are some small bugs in the current implementation, such as the missing creation of an actual ID for the uploaded and analysed documents, which would need to be fixed. Secondly, there is still some functionality missing to make Text Analytica a useful tool. Such functionality includes, but is not limited to, login and user account management, PDF upload and processing, more advanced analysis as well as the option to store, view and retrieve files within or from the application.
To turn Text Analytica into a production level application it must comply to certain security standards. While developing the prototype there was no focus on security and therefore only minimal / default security measures are currently in place.
Lastly the existing continuous delivery pipeline could be extended for example by adding testing stages.
\section{Conclusion}
\label{sec:conclusion}
In this report, Text Analytica, a cloud-based web service for text analysis, was presented. The application is hosted on \textit{Microsoft Azure} and utilises a range of cloud computing technologies on IaaS and PaaS level.
The development of the project was described from the first conceptual vision to the implementation and testing of a fully functional prototype, based on Docker containers orchestrated by a Kubernetes cluster and an \textit{Azure Cosmos DB}. The discussion included elaborating on the platform choice, system architecture and scalability aspects. The results of a relatively simple load test, measuring the performance of the application under a high number of parallel site accesses, suggest Text Analytica is well suited for scaling.
Improvements to enhance the submitted prototype and eventually develop it into a production grade application, have been suggested.
\bibliographystyle{IEEEtran}
\bibliography{cw1-97059-97133}
\end{document}
| {
"alphanum_fraction": 0.8115227812,
"avg_line_length": 130.5321637427,
"ext": "tex",
"hexsha": "d109e6c6c0ba53dcbdddc77ac5672073df4b3862",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bc4dca31a3c3f66a9ba92a4d83e792680d040f4b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "darkcookie298/CloudComputing",
"max_forks_repo_path": "report/cw1/cw1-97059-97133.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "bc4dca31a3c3f66a9ba92a4d83e792680d040f4b",
"max_issues_repo_issues_event_max_datetime": "2019-01-10T19:36:26.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-01-09T10:50:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "darkcookie298/CloudComputing",
"max_issues_repo_path": "report/cw1/cw1-97059-97133.tex",
"max_line_length": 1086,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bc4dca31a3c3f66a9ba92a4d83e792680d040f4b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "darkcookie298/CloudComputing",
"max_stars_repo_path": "report/cw1/cw1-97059-97133.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4722,
"size": 22321
} |
\section{assigning from incompatible type}\label{sec:incompatible-assignment}
\begin{figure}
\begin{lstlisting}
#include <stdio.h>
int main(int argc, const char* arv[]) {
double n = 5;
double *pN;
pN = n;
printf("Number: %d\n", *pN);
return 0;
}
\end{lstlisting}
\errmsg{assigning to 'double *' from incompatible type 'double'; take the address with \&}
\label{ex:incompatible-assignment}
\end{figure}
This error means that you are attempting to assign a variable of one type with an expression of a different type.
In this case, the problem is that you forgot to get the address of \code{n} with the address-of operator \code{\&}, so it is trying to assign a \code{double} to a \code{double*} variable.
This error is similar to an incompatible initialization (Section \ref{sec:incompatible-initialization}) except in that case you are trying to initialize a variable rather than assign it a new value.
\newpage | {
"alphanum_fraction": 0.7491856678,
"avg_line_length": 38.375,
"ext": "tex",
"hexsha": "c43000d5de291b7a854f5e2a2925e571edff00f5",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8775c6fd83b13ad96cc5dfa5e366cd44a627f95d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jdalyuml/compile-error-book",
"max_forks_repo_path": "c-book/compile-errors-cpp/errors/incompatible-assignment.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8775c6fd83b13ad96cc5dfa5e366cd44a627f95d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jdalyuml/compile-error-book",
"max_issues_repo_path": "c-book/compile-errors-cpp/errors/incompatible-assignment.tex",
"max_line_length": 198,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8775c6fd83b13ad96cc5dfa5e366cd44a627f95d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jdalyuml/compile-error-book",
"max_stars_repo_path": "c-book/compile-errors-cpp/errors/incompatible-assignment.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 238,
"size": 921
} |
\section{Internals}
\frame{\tableofcontents[currentsection]}
\begin{frame}
\frametitle{Elixir Processes}
\structure{Other Languages}
\begin{itemize}
\item Use threads sparingly
\item Thread creation is costly affair
\item Mitigated by using thread pools
\item Synchronization required
\item No language support
\end{itemize}
\vskip5mm
\structure{Elixir}
\begin{itemize}
\item Create as many processes as you want
\item No synchronization problems
\item Processes are cheap
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Internals}
\begin{center}
\begin{tikzpicture}[scale=0.9,transform shape,
os process/.style={drop shadow,fill=red!50},
os process header/.style={opacity=0.5,font=\huge\sc},
os thread/.style={drop shadow,fill=blue!50},
os thread header/.style={opacity=0.5,font=\large\sc},
elixir process/.style={drop shadow,fill=green!50}]
\draw[os process] (0,0) rectangle (10, 8);
\node[os process header,anchor=north] at (5,8) {os process};
\foreach \x/\y in {0.5/0.5,5.5/0.5,0.5/4,5.5/4} {
\coordinate (p) at (\x,\y);
\draw[os thread] (p) rectangle ++(4, 3);
\node[os thread header,anchor=north] at ($ (p) + (2,3) $) {os thread};
\foreach[evaluate={\i*0.5-0.25} as \dx] \i in {1,...,8} {
\foreach[evaluate={\j*0.5-0.2} as \dy] \j in {1,...,5} {
\draw[elixir process] (\x+\dx,\y+\dy) circle[radius=0.1cm];
}
}
}
\only<2>{
\node[note] at (5,4) {
Each dot = Elixir process
};
}
\end{tikzpicture}
\end{center}
\end{frame}
\begin{frame}
\frametitle{Internals}
\begin{itemize}
\item BEAM Virtual Machine lives in single OS process
\item It creates $N$ threads on a machine with $N$ cores
\item Each threads runs a scheduler
\item Processes are distributed among these schedulers
\item Schedulers determine which processes runs
\item Cooperative "multi-threading" makes it efficient
\end{itemize}
\end{frame}
| {
"alphanum_fraction": 0.5427031509,
"avg_line_length": 35.4705882353,
"ext": "tex",
"hexsha": "a02a8dad1a104c15e29ebc71d538cc8329623339",
"lang": "TeX",
"max_forks_count": 32,
"max_forks_repo_forks_event_max_datetime": "2020-10-06T15:01:47.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-09-19T03:25:11.000Z",
"max_forks_repo_head_hexsha": "06743e4e2a09dc52ff52be831e486bb073916173",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "DennisWinnepenninckx/distributed-applications",
"max_forks_repo_path": "slides/processes/aux-internals.tex",
"max_issues_count": 22,
"max_issues_repo_head_hexsha": "06743e4e2a09dc52ff52be831e486bb073916173",
"max_issues_repo_issues_event_max_datetime": "2020-03-16T14:43:06.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-06-19T18:58:13.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "DennisWinnepenninckx/distributed-applications",
"max_issues_repo_path": "slides/processes/aux-internals.tex",
"max_line_length": 86,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "06743e4e2a09dc52ff52be831e486bb073916173",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "DennisWinnepenninckx/distributed-applications",
"max_stars_repo_path": "slides/processes/aux-internals.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-22T09:52:11.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-22T09:52:11.000Z",
"num_tokens": 664,
"size": 2412
} |
% This front matter template includes only the information you need to
% give us. We will take care of the rest of the front matter.
\title{Title of Your Book}
\author{Your Name}
\thanks{Department of Computer Science\\
Brown University\\
Providence, RI}
\maketitle
\chapter*{Abstract}
In quantum computing, where algorithms
exist that can solve computational problems
more efficiently than any known classical
algorithms, the elimination of errors
that result from external disturbances
or from imperfect gates has become the
``holy grail,'' and a worldwide quest for
a large scale fault-tolerant and computationally
superior quantum computer is currently
taking place. Optimists rely on the premise
that, under a certain threshold of errors,
an arbitrary long fault-tolerant quantum
computation can be achieved with only
moderate (i.e., at most polynomial) overhead in computational cost.
Pessimists, on the other hand, object
that there are in principle (as opposed
to merely technological) reasons why such
machines are still inexistent, and that
no matter what gadgets are used, large
scale quantum computers will never be
computationally superior to classical
ones. Lacking a complete empirical characterization
of quantum noise, the debate on the physical
possibility of such machines invites philosophical
scrutiny. Making this debate more precise
by suggesting a novel statistical mechanical
perspective thereof is the goal of this project.
\section*{Keywords}
computational complexity, decoherence,
error-correction, fault-tolerance, Landauer's
Principle, Maxwell's Demon, quantum computing,
statistical mechanics, thermodynamics
%%% Dediction
\tableofcontents
\listoffigures
\listoftables
\input{preface}
| {
"alphanum_fraction": 0.7889137738,
"avg_line_length": 30.2711864407,
"ext": "tex",
"hexsha": "4b4ec8e4e8f5f995d614a9c7edfe0967dc9583fe",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-01-09T13:19:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-01-09T13:19:17.000Z",
"max_forks_repo_head_hexsha": "f226a7aaab7d6bd1b8752345c1a9a525af5501af",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gmarciani/templatex",
"max_forks_repo_path": "official/acmbook/front.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f226a7aaab7d6bd1b8752345c1a9a525af5501af",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gmarciani/templatex",
"max_issues_repo_path": "official/acmbook/front.tex",
"max_line_length": 71,
"max_stars_count": 8,
"max_stars_repo_head_hexsha": "f226a7aaab7d6bd1b8752345c1a9a525af5501af",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gmarciani/templatex",
"max_stars_repo_path": "official/acmbook/front.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-23T05:15:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-10-06T08:09:32.000Z",
"num_tokens": 379,
"size": 1786
} |
\section{One to one and onto transformations}
\begin{outcome}
\begin{enumerate}
\item Determine if a linear transformation is onto or one to one.
\end{enumerate}
\end{outcome}
Let $T: \R^n \to \R^m$ be a linear transformation. We define the \textbf{range}\index{linear transformation!range} or \textbf{image}\index{linear transformation!image} of $T$ as the set of vectors of $\R^{m}$ which are of the form
$T (\vect{x})$ (equivalently, $A\vect{x}$) for some $\vect{x}\in \R^{n}$. It is common
to write $T\R^{n}$, $T(\R^{n})$, or
$\func{Im}(T) $ to denote these vectors.
\begin{lemma}{Range of a matrix transformation}{Ax}
Let $A$ be an $m\times n$-matrix where $A_{1},\ldots, A_{n}$ denote the columns of
$A$\index{range of matrix transformation}. Then, for a vector $\vect{x}=\begin{mymatrix}{c}
x_{1} \\
\vdots \\
x_{n}
\end{mymatrix}$ in $\R^n$,
\begin{equation*}
A\vect{x}=\sum_{k=1}^{n}x_{k}A_{k}
\end{equation*}
Therefore, $A (\R^n)$ is the collection of all
linear combinations of these products.
\end{lemma}
\begin{proof}
This follows from the definition of matrix multiplication.
\end{proof}
This section is devoted to studying two important characterizations of linear transformations, called one to one and onto. We define them now.
\begin{definition}{One to one}{one-to-one}
Suppose $\vect{x}_1$ and $\vect{x}_2$ are vectors in $\R^n$. A linear transformation $T: \R^n \to \R^m$ is called \textbf{one to one}\index{one to one} (often written as $1-1)$ if whenever
$\vect{x}_1 \neq \vect{x}_2$ it follows that :
\begin{equation*}
T(\vect{x}_1) \neq T (\vect{x}_2)
\end{equation*}
Equivalently, if $T(\vect{x}_1) =T(\vect{x}_2)$,
then $\vect{x}_1 = \vect{x}_2$. Thus, $T$ is one to one if it never takes two different
vectors to the same vector.
\end{definition}
The second important characterization is called onto.
\begin{definition}{Onto}{onto}
Let $T: \R^n \to \R^m$ be a linear transformation. Then $T$ is called \textbf{onto}\index{onto} if whenever $\vect{x}_2 \in \R^{m}$ there exists
$\vect{x}_1 \in \R^{n}$ such that $T(\vect{x}_1) = \vect{x}_2. $
\end{definition}
We often call a linear transformation which is one-to-one an \textbf{injection}\index{injection}. Similarly, a linear transformation which is onto is often called a \textbf{surjection}\index{surjection}.
The following proposition is an important result.
\begin{proposition}{One to one}{one-to-one-matrices}
Let $T:\R^n \to \R^m$ be a linear transformation. Then $T$ is one to one if
and only if $T(\vect{x}) = \vect{0}$ implies $\vect{x}=\vect{0}$.
\end{proposition}
\begin{proof}
We need to prove two things here. First, we will prove that if $T$ is one to one, then
$T(\vect{x}) = \vect{0}$ implies that $\vect{x}=\vect{0}$. Second, we will show that if $T(\vect{x})=\vect{0}$ implies that $\vect{x}=\vect{0}$, then
it follows that $T$ is one to one. Recall that a linear transformation has the property that $T(\vect{0}) = \vect{0}$.
%%Note that since $T$ is linear, it is induced by an $m \times n$-matrix $A$. Therefore we can rewrite the statement ``$T_A(\vect{x}) = \vect{0}$ implies $\vect{x}=\vect{0}$'' in terms of the matrix $A$ as ``$A\vect{x}=\vect{0}$ implies $\vect{x}=\vect{0}$''. Therefore we can prove this theorem using $A$.
%%
%%Observe that $A\vect{0}=A(\vect{0}+\vect{0}) =A\vect{0} +A\vect{0}$ and so $A\vect{0}=\vect{0}$.
%%
%%Now suppose $A$ is one to one and $A\vect{x}=\vect{0}$. We need to show that this implies $\vect{x}=\vect{0}$. Since $A$ is one to one, by Definition~\ref{def:one-to-one} $A$ can only map one vector to the zero vector $\vect{0}$. Now $A\vect{x}=\vect{0}$ and $A\vect{0}=\vect{0}$, so it follows that $\vect{x}=\vect{0}$. Thus if $A$ is one to one and $A\vect{x}=\vect{0}$, then $\vect{x}=\vect{0}$.
%%
%%Next assume that $A\vect{x}=\vect{0}$ implies $\vect{x}=\vect{0}$. We need to show that $A$ is one to one. Suppose $A\vect{x}=A\vect{y}$. Then $A\vect{x} - A\vect{y} = \vect{0}$.
%%Hence $A\vect{x}-A\vect{y} = A(\vect{x}-\vect{y}) = \vect{0}$. However, we have assumed that $A\vect{x}=\vect{0}$ implies $\vect{x}=\vect{0}$. This means that
%%whenever $A$ times a vector equals $\vect{0}$, that vector is also equal to $\vect{0}$. Therefore, $\vect{x}-\vect{y} = \vect{0}$ and so $\vect{x}=\vect{y}$.
%%Thus $A$ is one to one by Definition~\ref{def:one-to-one}.
%%\end{proof}
Suppose first that $T$ is one to one and consider $T(\vect{0})$.
\begin{equation*}
T(\vect{0})=T(\vect{0}+\vect{0}) =T(\vect{0})+T(\vect{0})
\end{equation*}
and so, adding the additive inverse of $T(\vect{0})$ to both sides, one sees
that $T(\vect{0})=\vect{0}$. If $T(\vect{x})=\vect{0}$ it must be the
case that $\vect{x}=\vect{0}$ because it was just shown that $T(\vect{0})=\vect{0}$ and $T$ is assumed to be one to one.
Now assume that if $T(\vect{x})=\vect{0}$, then it follows that $\vect{x}=\vect{0}$. If $T(\vect{v})=T(\vect{u})$, then
\[
T(\vect{v})-T(\vect{u})=T(\vect{v}-\vect{u}) =\vect{0}
\]
which shows that $\vect{v}-\vect{u}=0$. In other words, $\vect{v}=\vect{u}$, and $T$ is one to one.
\end{proof}
Note that this proposition says that if $A=\begin{mymatrix}{ccc}
A_{1} & \cdots & A_{n}
\end{mymatrix} $ then $A$ is one to one if and only if whenever
\begin{equation*}
0 = \sum_{k=1}^{n}c_{k}A_{k}
\end{equation*}
it follows that each scalar $c_{k}=0$.
We will now take a look at an example of a one to one and onto linear transformation.
\begin{example}{A one to one and onto linear transformation}{one-to-one-onto-linear-transformation}
Suppose
\begin{equation*}
T\begin{mymatrix}{c}
x \\
y
\end{mymatrix} =\begin{mymatrix}{rr}
1 & 1 \\
1 & 2
\end{mymatrix} \begin{mymatrix}{r}
x \\
y
\end{mymatrix}
\end{equation*}
Then, $T:\R^{2}\rightarrow \R^{2}$ is a linear
transformation. Is $T$ onto? Is it one to one?
\end{example}
\begin{solution} Recall that because $T$ can be expressed as matrix
multiplication, we know that $T$ is a linear transformation. We will
start by looking at onto. So suppose $\begin{mymatrix}{c}
a \\
b
\end{mymatrix} \in \R^{2}$. Does there exist $\begin{mymatrix}{c}
x \\
y
\end{mymatrix} \in \R^2 $ such that $T\begin{mymatrix}{c}
x \\
y
\end{mymatrix} =\begin{mymatrix}{c}
a \\
b
\end{mymatrix}$? If so, then since $\begin{mymatrix}{c}
a \\
b
\end{mymatrix} $ is an arbitrary vector in $\R^{2}$, it will follow that $T$
is onto.
This question is familiar to you. It is asking whether
there is a solution to the equation
\begin{equation*}
\begin{mymatrix}{cc}
1 & 1 \\
1 & 2
\end{mymatrix} \begin{mymatrix}{c}
x \\
y
\end{mymatrix} =\begin{mymatrix}{c}
a \\
b
\end{mymatrix}
\end{equation*}
This is the same thing as asking for a solution to the following system of
equations.
\begin{equation*}
\begin{array}{c}
x+y=a \\
x+2y=b
\end{array}
\end{equation*}
Set up the augmented matrix and row reduce.
\begin{equation}
\begin{mymatrix}{rr|r}
1 & 1 & a \\
1 & 2 & b
\end{mymatrix} \rightarrow \begin{mymatrix}{rr|r}
1 & 0 & 2a-b \\
0 & 1 & b-a
\end{mymatrix}
\label{onto-matrix}
\end{equation}
You can see from this point that the system has a solution. Therefore,
we have shown that for any $a, b$, there is a $
\begin{mymatrix}{c}
x \\
y
\end{mymatrix}$ such that $T\begin{mymatrix}{c}
x \\
y
\end{mymatrix} =\begin{mymatrix}{c}
a \\
b
\end{mymatrix}$.
Thus $T$ is onto.
Now we want to know if $T$ is one to one.
By Proposition~\ref{prop:one-to-one-matrices} it is enough to show that $A\vect{x}=0$ implies $\vect{x}=0$.
Consider the system $A\vect{x}=0$ given by:
\begin{equation*}
\begin{mymatrix}{cc}
1 & 1 \\
1 & 2\\
\end{mymatrix}
\begin{mymatrix}{c}
x\\
y
\end{mymatrix}
=
\begin{mymatrix}{c}
0 \\
0
\end{mymatrix}
\end{equation*}
This is the same as the system given by
\begin{equation*}
\begin{array}{c}
x + y = 0 \\
x + 2y = 0
\end{array}
\end{equation*}
We need to show that the solution to this system is $x = 0$ and $y = 0$. By setting up the augmented matrix and row reducing, we end up with
\begin{equation*} \begin{mymatrix}{rr|r}
1 & 0 & 0 \\
0 & 1 & 0
\end{mymatrix}
\end{equation*}
This tells us that $x = 0$ and $y = 0$. Returning to the original system, this says that if
\begin{equation*}
\begin{mymatrix}{cc}
1 & 1 \\
1 & 2\\
\end{mymatrix}
\begin{mymatrix}{c}
x\\
y
\end{mymatrix}
=
\begin{mymatrix}{c}
0 \\
0
\end{mymatrix}
\end{equation*}
then
\begin{equation*}
\begin{mymatrix}{c}
x \\
y
\end{mymatrix}
=
\begin{mymatrix}{c}
0 \\
0
\end{mymatrix}
\end{equation*}
In other words, $A\vect{x}=0$ implies that $\vect{x}=0$. By
Proposition~\ref{prop:one-to-one-matrices}, $A$ is one to one, and so $T$ is also one to one.
We also could have seen that $T$ is one to one from our above solution for onto. By looking at the matrix given
by {\eqref{onto-matrix}}, you can see that there is a \textbf{unique} solution given
by $x=2a-b$ and $y=b-a$. Therefore, there
is only one vector, specifically
$\begin{mymatrix}{c}
x \\
y
\end{mymatrix}
=
\begin{mymatrix}{c}
2a-b\\
b-a
\end{mymatrix} $ such that $T\begin{mymatrix}{c}
x \\
y
\end{mymatrix} =\begin{mymatrix}{c}
a \\
b
\end{mymatrix}$. Hence by Definition~\ref{def:one-to-one}, $T$ is one to one.
\end{solution}
\begin{example}{An onto transformation}{onto-transformation}
Let $T: \R^4 \to \R^2$ be a linear transformation defined by
\[
T \begin{mymatrix}{c}
a \\
b \\
c \\
d
\end{mymatrix} =
\begin{mymatrix}{c}
a + d \\
b + c
\end{mymatrix}
\mbox{ for all } \begin{mymatrix}{c}
a \\
b \\
c \\
d
\end{mymatrix} \in \R^4
\]
Prove that $T$ is onto but not one to one.
\end{example}
\begin{solution}
You can prove that $T$ is in fact linear.
To show that $T$ is onto, let $\begin{mymatrix}{c}
x \\
y
\end{mymatrix}$ be an arbitrary vector in $\R^2$. Taking the vector $\begin{mymatrix}{c}
x \\
y \\
0 \\
0
\end{mymatrix} \in \R^4$ we have
\[
T \begin{mymatrix}{c}
x \\
y \\
0 \\
0
\end{mymatrix} =
\begin{mymatrix}{c}
x + 0 \\
y + 0
\end{mymatrix}
= \begin{mymatrix}{c}
x \\
y
\end{mymatrix}
\]
This shows that $T$ is onto.
By Proposition~\ref{prop:one-to-one-matrices} $T$ is one to one if and only if $T(\vect{x}) = \vect{0}$ implies that $\vect{x} = \vect{0}$. Observe that
\[
T \begin{mymatrix}{r}
1 \\
0 \\
0 \\
-1
\end{mymatrix} =
\begin{mymatrix}{c}
1 + -1 \\
0 + 0
\end{mymatrix}
= \begin{mymatrix}{c}
0 \\
0
\end{mymatrix}
\]
There exists a non-zero vector $\vect{x}$ in $\R^4$ such that $T(\vect{x}) = \vect{0}$. It follows that $T$ is not one to one.
\end{solution}
The above examples demonstrate a method to determine if a linear transformation $T$ is one to one or onto. It turns out that the matrix $A$ of $T$ can provide this information.
\begin{theorem}{Matrix of a one to one or onto transformation}{matrix-one-to-one-onto}
Let $T: \R^n \to \R^m$ be a linear transformation induced by the $m \times n$-matrix $A$. Then $T$ is one to one if and only if the rank of $A$ is $n$. $T$ is onto if and only if the rank of $A$ is $m$.
\end{theorem}
Consider Example~\ref{exa:onto-transformation}. Above we showed that $T$ was onto but not one to one. We can now use this theorem to determine this fact about $T$.
\begin{example}{An onto transformation}{onto-transformation-matrix}
Let $T: \R^4 \to \R^2$ be a linear transformation defined by
\[
T \begin{mymatrix}{c}
a \\
b \\
c \\
d
\end{mymatrix} =
\begin{mymatrix}{c}
a + d \\
b + c
\end{mymatrix}
\mbox{ for all } \begin{mymatrix}{c}
a \\
b \\
c \\
d
\end{mymatrix} \in \R^4
\]
Prove that $T$ is onto but not one to one.
\end{example}
\begin{solution}
Using Theorem~\ref{thm:matrix-one-to-one-onto} we can show that $T$ is onto but not one to one from the matrix of $T$. Recall that to find the matrix $A$ of $T$, we apply $T$ to each of the standard basis vectors $\vect{e}_i$ of $\R^4$. The result is the $2 \times 4$-matrix A given by
\[
A = \begin{mymatrix}{rrrr}
1 & 0 & 0 & 1 \\
0 & 1 & 1 & 0
\end{mymatrix}
\]
Fortunately, this matrix is already in {\rref}. The rank of $A$ is $2$. Therefore by the above theorem $T$ is onto but not one to one.
\end{solution}
Recall that if $S$ and $T$ are linear transformations, we can discuss their composite denoted $S \circ T$. The following examines what happens if both $S$ and $T$ are onto.
\begin{example}{Composite of onto transformations}{composite-onto}
Let $T: \R^k \to \R^n$ and $S: \R^n \to \R^m$ be linear transformations.
If $T$ and $S$ are onto, then $S \circ T$ is onto.
\end{example}
\begin{solution}
Let $\vect{z}\in \R^m$.
Since $S$ is onto, there exists a vector $\vect{y}\in \R^n$
such that $S(\vect{y})=\vect{z}$.
Furthermore, since $T$ is onto, there exists a vector $\vect{x}\in \R^k$
such that $T(\vect{x})=\vect{y}$.
Thus
\[ \vect{z} = S(\vect{y}) = S(T(\vect{x})) = (ST)(\vect{x}),\]
showing that for each $\vect{z}\in \R^m$ there exists and $\vect{x}\in \R^k$
such that $(ST)(\vect{x})=\vect{z}$.
Therefore, $S \circ T$ is onto.
\end{solution}
The next example shows the same concept with regards to one-to-one transformations.
\begin{example}{Composite of one to one transformations}{composite-one-to-one}
Let $T: \R^k \to \R^n$ and $S: \R^n \to \R^m$ be linear transformations.
Prove that if $T$ and $S$ are one to one, then $S \circ T$
is one-to-one.
\end{example}
\begin{solution}
To prove that $S \circ T$ is one to one, we need to show that if $S(T (\vect{v})) = \vect{0}$ it follows that $\vect{v} = \vect{0}$.
Suppose that $S(T (\vect{v})) = \vect{0}$. Since $S$ is one to one, it follows that $T (\vect{v}) = \vect{0}$. Similarly, since $T$ is one to one, it follows that $\vect{v} = \vect{0}$. Hence $S \circ T$ is one to one.
\end{solution}
| {
"alphanum_fraction": 0.6629830156,
"avg_line_length": 31.4289044289,
"ext": "tex",
"hexsha": "80f374044efe817f4fe713e8f8f673d744fc0f10",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-06-30T16:23:12.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-09T11:12:03.000Z",
"max_forks_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "selinger/linear-algebra",
"max_forks_repo_path": "old/content/lineartransformationsOneOneOnto.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "selinger/linear-algebra",
"max_issues_repo_path": "old/content/lineartransformationsOneOneOnto.tex",
"max_line_length": 400,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "37ad955fd37bdbc6a9e855c3794e92eaaa2d8c02",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "selinger/linear-algebra",
"max_stars_repo_path": "old/content/lineartransformationsOneOneOnto.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-30T16:23:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-21T06:37:13.000Z",
"num_tokens": 4851,
"size": 13483
} |
\documentclass[11pt]{amsart}
\usepackage{geometry} % See geometry.pdf to learn the layout options. There are lots.
\geometry{letterpaper} % ... or a4paper or a5paper or ...
%\geometry{landscape} % Activate for for rotated page geometry
%\usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
\usepackage{graphicx}
\usepackage{amssymb}
\usepackage{epstopdf}
\usepackage{dirtytalk}
\usepackage{fuzz}
\DeclareGraphicsRule{.tif}{png}{.png}{`convert #1 `dirname #1`/`basename #1 .tif`.png}
\title{Notes on Entropy}
\author{Arthur Ryman}
\date{\today} % Activate to display a given date or no date
\begin{document}
\begin{abstract}
The concept of entropy crops up in many seemingly unrelated areas, ranging from physics to information theory to
machine learning.
This article summarizes the mathematical foundations of entropy, providing precise definitions and proofs.
\end{abstract}
\maketitle
\section{Introduction}
Carnot discovered the concept of entropy while analyzing the maximum theoretical efficiency of heat engines.
Boltzman gave entropy a statistical definition in terms of the number of microscopic states available to a macroscopic state.
Shannon showed how entropy was relevant to the rate at which information could be transmitted over a communications channel.
Bekenstein proposed that black holes have entropy and Hawking defined their temperature and predicted that they would radiate.
And today, machine learning algorithms regularly use the concepts of entropy and information gain to analyze large datasets.
This is certainly a fascinating and important circle of ideas!
Unfortunately, it is difficult to find clear explanations of these concepts.
Typical textbooks on thermodynamics do not lay out the material using precise mathematical language.
My goal in this article is to collect together all the related definitions and give them a precise mathematical treatment.
The target audience for this article is me since I am personally dissatisfied with my grasp of the subject.
However, if I manage to explain this material clearly and simply enough, then it may be of interest to the machine learning community.
\section{Microscopic and Macroscopic States}
When Carnot discovered entropy, the atomic theory of matter was not widely accepted as fact by the scientific community.
Carnot analyzed heat engines using their macroscopic properties, such as temperature, volume, and pressure.
When the atomic theory was being developed, physicists sought to derive the macroscopic properties of matter from
the statistical properties of their microscopic constituents.
This approach was called {\it statistical mechanics} or, more generally, {\it statistical physics}.
In fact, the success of statistical mechanics in explaining thermodynamics was viewed as strong supporting evidence for the
validity of the atomic theory of matter.
\subsection{Boltzman Entropy}
Although the thermodynamic definition of entropy came first,
I am going to start with the statistical definition since it explains the thermodynamic definition and
it applies more directly to machine learning.
The concept of entropy arises when the we forget information about the states of a system.
Consider a system and let $A$ be the set of states that the system can be in.
For example, consider a classical system that consists of $N$ identical point particles each mass $m$ moving in a box of fixed volume
that has perfectly reflecting walls.
The state of this system is determined by the position and momentum of each particle.
We refer to $A$ as the set of {\it microscopic states} of the system or, more briefly, its {\it microstates}.
Suppose further that $B$ is a related set of more coarse-grained states referred to as the {\it macroscopic states} or {\it macrostates}.
Every macrostate corresponds to one or more microstates.
Continuing with the example of particles in a box, the macrostate might be defined simply by the system's temperature.
Let the correspondence between the microstates and macrostates be given by a {\it forgetful} mapping $f$ of $A$ onto $B$:
\begin{equation}
f: A \fun B
\end{equation}
For particles in a box, the temperature is their average kinetic energy
The Boltzman definition of entropy applies directly to the case where for each macrostate $b$ there is a finite number $\Omega_f(b)$ of microstates
that map to $b$ under $f$:
\begin{equation}
\Omega_f : B \fun \nat_1
\end{equation}
where
\begin{equation}
\Omega_f(b) = \# \{~ a: A | f(a) = b ~\}
\end{equation}
In this case the entropy $S_f(b)$ of the macrostate $b$ is defined to be:
\begin{equation}
S_f : B \fun \mathbb{R}
\end{equation}
where
\begin{equation}
S_f(b) = k \ln \Omega_f(b)
\end{equation}
In the system of particles in a box there are an infinite number of microstates for each macrostates so we'll need to adapt Boltzman's definition of entropy a little.
Before doing that, let's examine the situation for finite system.
\subsection{The Entropy of Finite Systems}
Consider a particle that can exist in one of some finite number $d$ of states.
Let $\Phi$ denote the set these single-particle states:
\begin{equation}
\# \Phi = d
\end{equation}
Let $\phi_1, \ldots, \phi_d$ denote the $d$ states in $\Phi$:
\begin{equation}
\Phi = \{ \phi_1, \ldots, \phi_d \}
\end{equation}
Now consider a system of $N$ of these particles.
Each microstate of this system is given by an ordered $N$-tuple of single-particle states:
\begin{equation}
A = \Phi^N
\end{equation}
Recall the definition of the Kronecker $\delta$-function:
\begin{equation}
\delta(x,y) =
\left\{
\begin{array}{ll}
1 & \mbox{if $x = y$} \\
0 & \mbox{if $x \neq y$}
\end{array}
\right.
\end{equation}
Given an $N$-particle microstate $a = (a_1, \ldots, a_N)$ and a single-particle state $\phi \in \Phi$ let $n(a, \phi)$ be the number of particles in
$a$ that are in the single-particle state $\phi$:
\begin{equation}
n : A \cross \Phi \fun \nat
\end{equation}
where
\begin{equation}
n(a, \phi) = \sum_{i=1}^N \delta(a_i, \phi)
\end{equation}
The number of particles in a single-particle state is referred to as the {\it occupancy number} of that state.
Note that for any multi-particle state $a$ we have:
\begin{equation}
\sum_{j=1}^d n(a, \phi_j) = N
\end{equation}
Define the macrostates of the system to be the set of all possible $N$-particle occupancy number assignments $b$ on $\Phi$:
\begin{equation}
B = \{~ b: \Phi \fun \nat | \sum_{j=1}^d b(\phi_j) = N ~\}
\end{equation}
The forgetful mapping $f : A \fun B$ sends an $N$-particle state to its occupancy numbers on $\Phi$:
\begin{equation}
f(a)(\phi) = n(a, \phi)
\end{equation}
TO DO: Compute the entropy of a state b in B. Use Stirling's formula to approximate it. Show how Shannon's entropy emerges.
%\subsection{}
\end{document} | {
"alphanum_fraction": 0.7498565691,
"avg_line_length": 39.6136363636,
"ext": "tex",
"hexsha": "a1bef10a2a46cac67ad9b8baefbfefbea4e78d3d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ca2deaf0042230b6860a5d60a24cca38be6ac511",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "agryman/entropy",
"max_forks_repo_path": "entropy.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ca2deaf0042230b6860a5d60a24cca38be6ac511",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "agryman/entropy",
"max_issues_repo_path": "entropy.tex",
"max_line_length": 166,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ca2deaf0042230b6860a5d60a24cca38be6ac511",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "agryman/entropy",
"max_stars_repo_path": "entropy.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1762,
"size": 6972
} |
{% extends "simple_event_list/tex/base.tex" %}
{% block document %}
\section*{Calendar Entries for {{ target_year }} }
{% for month in entries %}
\subsection*{ {{ month.0.timestamp|date:"F" }} }
\begin{enumerate}
{% for entry in month %}
{% include "simple_event_list/tex/single_entry_line.tex" with event=entry %}
{% endfor %}
\end{enumerate}
{% endfor %}
{% endblock document %}
| {
"alphanum_fraction": 0.6280193237,
"avg_line_length": 29.5714285714,
"ext": "tex",
"hexsha": "a8c77535422ae993eb814630186d061902090fac",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3354c751e29d301609ec44e64d69a8729ec36de4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Mischback/django-calingen",
"max_forks_repo_path": "calingen/contrib/layouts/simple_event_list/templates/simple_event_list/tex/simple_event_list.tex",
"max_issues_count": 51,
"max_issues_repo_head_hexsha": "3354c751e29d301609ec44e64d69a8729ec36de4",
"max_issues_repo_issues_event_max_datetime": "2022-02-10T08:33:08.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-15T20:44:19.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Mischback/django-calingen",
"max_issues_repo_path": "calingen/contrib/layouts/simple_event_list/templates/simple_event_list/tex/simple_event_list.tex",
"max_line_length": 82,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3354c751e29d301609ec44e64d69a8729ec36de4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Mischback/django-calingen",
"max_stars_repo_path": "calingen/contrib/layouts/simple_event_list/templates/simple_event_list/tex/simple_event_list.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 107,
"size": 414
} |
%!TEX root = ../report.tex
%
% Related work
%
% (~17pgs)
\section{Related Work}
\label{sec:relatedWork}
This section describes the related work and is organized in three main parts. First, in order to better motivate our proposal, we discuss the existing security issues of a class of applications that can take advantage of a trusted output functionality: mobile health applications. In the second part of this section, we overview the state of the art of general-purpose mobile security mechanisms used to implement most secure sensitive applications, such as those described in the first part. We also explain why such mechanisms fall short in supporting secure output to these apps. Finally, the third part describes a specific class of security systems which, similarly to \emph{ViewZone}, leverage TrustZone to implement a variety of security solutions for mobile platforms.
\subsection{Studies in Mobile Health Security}
\label{sec:mhealth}
Medical data is highly profitable for malicious agents who can use it for medical identity theft, as it can be more valuable than credit card information. Generally, vulnerabilities which lead to sensitive data leakage stem from negligent development of healthcare systems. For this reason, regulatory laws such as the \ac{HIPAA} have been established. These laws comprise the standard for electronic healthcare transactions and must be followed by all developers when managing sensitive health data.
In the mobile context, data is even more exposed and vulnerable due to several reasons: \emph{(1)} inherent portability of these devices, \emph{(2)} sharing of information to third-party advertisers by device manufacturers or mobile app developers, \emph{(3)} unregulated management of sensitive medical information, specially because regulatory laws such as \ac{HIPAA} do not account for the mobile sector, and \emph{(4)} existence of security flaws in consumer device software, which can be exploited by malware. Such an exposure to attacks motivates the need for a security system capable of allowing applications to securely display and manage sensitive data.
The following subsections describe work done in assessing the security properties of commercial mobile health applications as to better understand the need for a secure content display capability. The first subsection describes a threat taxonomy for mHealth applications by Kotz~\cite{kotz2011threat}. The second and third subsections describe work done by He et al.~\cite{he2014security} through the analysis of three studies which answer the following questions: \emph{what are the potential attack surfaces} and \emph{how widespread and how serious are these security threats}.
\subsubsection{Threat Taxonomy for Mobile Health Applications}
Kotz~\cite{kotz2011threat} defines a threat taxonomy for mHealth and categorizes threats in three main categories: \emph{identity threats}, \emph{access threats} and \emph{disclosure threats}.
\emph{Identity threats} are described as a misuse of patient identities and include scenarios where a patient may lose (or share) their identity credentials, allowing malicious agents to access their respective \ac{PHR}. \emph{Insiders} (authorized \ac{PHR} users, staff of the \ac{PHR} organization or staff of other mHealth support systems) may also use patient identities for medical fraud or medical identity theft. And \emph{outsiders} may be able to observe patient identity or location from communications.
%tempo verbal no their? nao entendi
\emph{Access threats} are described as unauthorized accesses to \ac{PHR} and include scenarios where the patient controlling the data allows broader-than-intended access or disclosure of information. Additionally, \emph{insiders} may also snoop or modify patient data with malicious intent, and \emph{outsiders} may leak or modify this data by breaking into patient records.
\emph{Disclosure threats} include scenarios where an adversary captures network packets in order to obtain sensitive health data. This problem can be mitigated by using strong encryption methods. But even if the network traffic is encrypted it is possible to analyse the traffic to determine its characteristics~\cite{wright2006inferring}. The adversary may also use physical or link layer fingerprinting methods to identify the device type and inject or selectively interfere with wireless frames.
Avancha et al.~\cite{avancha2012privacy} developed a privacy framework for sensitive mobile health systems. A developer should use the list of privacy properties provided by this article as a check-list to be considered in any design. But even these security properties are not sufficient to withstand an attack leveraged by a compromised operating system, which is an attack not explicitly described in the above taxonomy. For this reason, there is a need for a security solution capable of withstanding such attacks whilst protecting sensitive information.
\subsubsection{Potential Attack Surface of Commercial Mobile Health Applications}
He et al.~\cite{he2014security} analysed several mHealth applications available in Google Play (Android's app store) to contribute to the understanding of security and privacy risk on the Android platform. In the first of three studies described, 160 apps were analysed to find evidence of security threats. From surveying previous literature, seven attack surfaces are determined to be in need of protection. These attack surfaces are shown in Table~\ref{tab:attacksurfaces}. This table provides an overview of the problems disclosed by the apps studied. From the table, we can observe that applications send unencrypted information or use incorrectly configured Internet protocols. These apps also store data in third-party servers, where data is not assured to be secure. Additionally, these apps inadvertently log sensitive data to the system or export private app components, making them accessible to other applications.
\begin{table}[t]
\caption {Description of attack surfaces (taken from He et al. \cite{he2014security}).}
\label{tab:attacksurfaces}
\begin{tabular}{|>{\raggedright}p{2cm}|>{\raggedright\arraybackslash}p{10cm}|}
\hline
\textbf{Attack Surface} & \textbf{Description} \\ \hline
Internet & Sensitive information is sent over the Internet with unsecure protocols (e.g. HTTP), misconfigured HTTPS, etc.. \\ \hline
Third-Party & Sensitive information is stored in third-party servers. \\ \hline
Bluetooth & Sensitive information collected by Bluetooth-enabled health devices can be sniffed or injected. \\ \hline
Logging & Sensitive information is put into system logs where it is not secured. \\ \hline
SD Card Storage & Sensitive information is stored in unencrypted files on the SD card, publicly accessible by any other app. \\ \hline
Exported Components & Android app components, intended to be private, are set as exported, making them accessible by other apps. \\ \hline
Side Channels & Sensitive information can be inferred by a malicious app with side channels, e.g. network package size, sequence, timing, etc.. \\ \hline
\end{tabular}
\end{table}
However, Table~\ref{tab:attacksurfaces} is missing an important attack surface, which is the OS. Operating systems have a large and complex code base, thus increasing the incidence of coding vulnerabilities. These vulnerabilities may be explored by malware which can then control system resources and the overlying applications. Resources, such as the framebuffers or touchscreen device, might be explored by a malicious agent in order to disclose data, such as sensitive medical information.
He et al. also document that most apps targeted for patients (60\%) are in the Life Management category, which comprises nutrition and fitness apps. These are followed by apps that manage and synchronize user health information (\ac{PHR} Management), which occupy nearly half (46.88\%) of the domain. These numbers are a good indicator of the data handled by most commercial mHealth apps available, as well as which health apps are more used in the mobile health sector. From these numbers we can infer that a personal health record viewer can serve as a representative use case to demonstrate a trusted output solution.
\subsubsection{Severity of Attacks to Mobile Health Applications}
In the second study by He et al.~\cite{he2014security}, 27 of the top 1080 free apps from the Medical and Health \& Fitness categories on Google Play were analysed in order to assess the most commonly observed vulnerabilities. From this analysis, three attack surfaces are identified as the most important ones: \emph{Internet}, \emph{Third Party Services} and \emph{Logging} (see Table~\ref{tab:attacksurfaces}). Only 7 of the 27 apps use the Internet to effectively send medical information over to remote servers. Some of these apps send unencrypted content, which generally include emails, usernames and passwords. This study also concludes that most of these apps are hosted and store the recorded data on third party servers. This is an economical and scalable solution for mobile applications, but storing sensitive health records on third party servers can have serious implications. This is mostly because users are not capable of assessing whether the data is encrypted, and thereby preventing hosting companies from accessing it.
In the third study by He et al.~\cite{he2014security}, another 22 apps, which send information over the Internet, are randomly selected from the same top 1080 apps. These apps were then audited to understand what information is effectively being sent, thus inferring the seriousness of the threats. The conclusion is that, when used as intended, these apps gather, store and transmit a variety of sensitive user data. This data includes personal profiles, health sensor data, lifestyle data, medical browsing history and third-party app data (e.g., Facebook account information).
The consequences of disclosing or tampering sensitive health data depend on the type, sensitivity and volume of data breached. However, it is clear that profiling, medical identity theft and healthcare decision-making errors are all possible. This is why He et al. suggest the use of encryption for communication and storage, and encourage developers to create a set of standard privacy guidelines that offer a baseline for protection. Nonetheless, these measures are not sufficient to protect sensitive data against more complex attacks, such as those which involve a malware compromised OS. In a system where a malicious entity controls system resources, encryption techniques and good developing guidelines might not be enough. This is because the system can bypass these mechanisms and disclose data when it is in its raw displayable form, i.e., unencrypted.\\
Security measures and regulatory laws are implemented for custom healthcare information systems in order to mitigate attacks which may disclose sensitive medical data. However, in the mobile sector, these security measures may not be possible or practical to implement, and regulatory laws, such as \ac{HIPAA}, do not account for the mobile market. For this reason, the research community analysed and categorized mHealth threats, developed a privacy framework and studied commercially available apps in order to achieve a standardized set of rules to mitigate negligent development. But these studies consider the whole operating system as the trusted computing base for mHealth applications. Because this \ac{TCB} is so complex and unreliable, malware may take control of system resources and consequently leak sensitive data. Ultimately, what the mHealth market needs is a security solution which provides secure primitives that both developers and users desire, such as secure display. And this secure display must work without having to rely on a full-featured operating system, inherently with a large \ac{TCB}.
\subsection{General-Purpose Mobile Security Mechanisms}
Like most existing mobile applications, mHealth apps tend to depend on the security services provided by the mobile platform. This section provides an overview of the security mechanisms that have been developed for Android.
Since many security mechanisms have been proposed, we divide them into groups to reflect the different ways in which sensitive information is protected: \emph{(a) access control mechanisms}, which enforce security policies that prevent access to sensitive resources by a given application, \emph{(b) application communication monitoring}, which ensures that sensitive data that was read by a given application can be securely exchanged with co-located apps, \emph{(c) privacy enhancement systems}, which aim at preventing privacy breaches by applications that read sensitive data, and need to share it with Internet services, and \emph{(d) trusted execution systems} (TES), which provide restricted environments in which sensitive data can be processed while trusting in a smaller code base than that of a rich OS. Figure~\ref{fig:relwork} maps these four groups to a representation of a possible mobile system.
\begin{figure}[t!]
\centering
\includegraphics[width=0.50\textwidth]{img/relwork.pdf}
\caption{Security mechanisms map; A-application, R-system resource, TES-Trusted Execution System.}
\label{fig:relwork}
\end{figure}
\subsubsection{Access Control Mechanisms}
\label{sec:accesscontrol}
% ACCESS CONTROL MECHANISMS
Access control mechanisms implement security models in which subjects (e.g. user, processes, threads, etc.) are constrained by a security policy to perform certain actions on the system, namely accesses to resources, typically called objects (e.g. files, sensors, etc.).
Android inherits a \ac{DAC} mechanism from its Linux based kernel, but some system resources, such as the IPC Binder mechanism, are accessed via \ac{MAC} policies. In a \ac{DAC} system the data owner is responsible for the data and, as such, determines who can access it. In Android, an application can create and store files in the filesystem, thus becoming the sole owner of such files, and it can allow access to them for any other application. In \ac{MAC}, subjects are much more restricted in determining specific permissions because the restrictions on these resources are defined by a global system policy. In the Android operating system, once a subject attempts to access an object, it triggers a policy evaluation by the kernel, which assesses whether the access is granted. The advantage of this strict system is its robustness and restrictiveness, because subjects cannot override or modify the security policy. In Android, applications must specify in their manifest the permissions they require at runtime, and after the installation neither applications nor users have any control over the access policies.
Because \ac{MAC} is more restrictive, several systems were created over the years to extend \ac{MAC}'s access control model to other Android resources. SEAndroid~\cite{smalley2013security} solves problems related to resources complying with the \ac{DAC} mechanism. The authors ported SELinux~\cite{peter2001integrating} to provide \ac{MAC} at the kernel layer. The kernel was then modified to support a new \ac{MAC} policy (e.g., filesystem, IPC). A new middleware layer (MMAC) was also created to extend \ac{MAC} to Android's Binder IPC. TrustDroid~\cite{bugiel2011practical} extends the \ac{MAC} mechanism to all the platform's resources in order to isolate different domains' sensitive information.
\paragraph{\textbf{Permission Refinement}}
In spite of its robustness, the Android permission mechanism is a very restrictive. At install time, the system displays on screen the list of application-specific permissions that must be accepted or denied by the user. The user is forced into a binary decision, either granting all permissions or quitting the installation. Furthermore, some of these applications may even request more permissions than those effectively needed. This is an inflexible solution, which makes it impossible for users to have full control over the permissions an application effectively requires at runtime. This inflexibility allows an app to use the device's resources without the user's knowledge, possibly with malicious intent.
Over the years, many systems aimed at improving the state of affairs through permission refinement. APEX~\cite{nauman2010apex} modified Android's permission model to allow users to specify the assignment of permissions to each app, both at install and runtime. Permission Tracker~\cite{kern2012permission} allows users to be informed on how permissions are used at runtime and offers the possibility of revoking those permissions. Furthermore, a user can specify which permissions are of interest so they can be notified of every permission access and decide whether to grant or deny that access. These systems improve the original Android permission model, but require manual configuration by the user. % tempo verbal no they
A more useful solution would be to use a context-aware system to handle the permissions at runtime. This way it may be possible to automatically restrict permissions to all applications running along side a security sensitive app, thus isolating this application and avoiding possible leaks by shared resources.
There are several context-aware permission refinement systems developed by the research community. Trusted third-parties can use CRePE~\cite{conti2011crepe} to enforce security policies on other devices. For example, an employer may enforce a security policy on the employees' mobile devices when inside the company. Similarly, MOSES~\cite{russello2012moses} enforces domain isolation through the concept of security profiles, allowing it to switch profiles based on pre-established conditions (e.g., GPS coordinates and time). Additionally, MOSES leverages TaintDroid~\cite{enck2014taintdroid} to prevent apps from one profile to access data belonging to another. Both CRePE and MOSES suffer from a device control issue where a third-party defines a policy that cannot be revoked by the user. Moreover, a user has no way to deny the enforcement of a third-party policy.
\paragraph{\textbf{Access Control Hook APIs}}
Most security extensions, such as CRePE~\cite{conti2011crepe} or MOSES~\cite{russello2012moses}, require modifying and adding components to the kernel and middleware layers in order to implement new security models. Some frameworks, such as \ac{ASM}~\cite{heuser2014asm} and \ac{ASF}~\cite{backes2014android}, were built in order to ease this development process. These frameworks allow developers to easily create new security models as ordinary Android applications whilst benefiting from a full callback system, which allow apps to be notified of accesses to resources of interest.
These frameworks comprise a set of hooks distributed along the kernel and middleware layers, which can be registered by a secure application. When a hook is activated it triggers a callback from the Hook API module, which in turn is forwarded to the app for verification. The app then decides if the operation that triggered the hook activation may or may not proceed. The main advantage of these frameworks is the flexibility and freedom given to developers in choosing whatever resources to manage.
\paragraph{\textbf{Memory Instrumentation}}
An alternative approach to implementing access control policies is based on memory instrumentation. Memory instrumentation leverages application code analysis techniques to restrict access from those applications to the corresponding resources. Memory instrumentation can be divided into two groups: \emph{static memory instrumentation} and \emph{dynamic memory instrumentation}. While static memory instrumentation changes pre-compiled bytecode, dynamic instrumentation patches running processes, and for this reason it supports the enforcement of new security models. DeepDroid~\cite{wang2015deepdroid} relies on dynamic memory instrumentation to enforce fine-grained context-aware security policies for enterprise and does this by patching several system services and tracing system calls to resources of interest.
\paragraph{\textbf{Digital Rights Management}} is a specific access control technology which allows data owners to restrict if and how their data can be copied and also how it can be handled once transferred to another device.
The \ac{DRM} ecosystem is composed of the following entities:
\begin{itemize}
\item[$\bullet$] \emph{User} - human user of the DRM Content;
\item[$\bullet$] \emph{Content Issuer} - entity that delivers the content;
\item[$\bullet$] \emph{Rights Issuer} - entity responsible for assigning permissions and constraints to \ac{DRM} content;
\item[$\bullet$] \emph{Rights Object} - XML document generated by a Rights Issuer expressing the restrictions associated to the content;
\item[$\bullet$] \emph{\ac{DRM} Agent} - trusted entity responsible for enforcing permissions and constraints upon the \ac{DRM} content.
\end{itemize}
In order to define the format of the content delivered to DRM Agents and how this content can be transferred from the Content Issuer to the DRM Agent, the \ac{OMA} developed the DRM standard~\cite{drm}. Android provides an extensible DRM framework, called Android DRM Framework~\cite{android_drm}. This framework allows application developers to enable their apps to manage rights-protected content by complying with one of the supported DRM schemes (mechanisms, enforced by DRM Agents, to handle particular types of files).
To understand how \ac{DRM} could be employed in the context of mHealth, one can suggest the simple example of a \ac{PHR} mobile health application. In this scenario, the healthcare provider (e.g., a hospital) would be the \emph{content issuer}, and it would use a \emph{rights issuer} to assign the restrictions imposed upon the \ac{DRM} content. These restrictions are imposed upon the content, i.e., the personal health record, when this content is transferred to the patient's device. When using \ac{DRM}, the patient is limited to access the content through a \emph{\ac{DRM} Agent}. Although this solution seems to fit the security properties a critical application requires, once again the whole operating system belongs to the trusted computing base. Therefore, resources shared by different applications such as the framebuffers and display devices may leak sensitive information if the system becomes compromised.
\subsubsection{Application Communication Monitoring}
\begin{figure}[t!]
\centering
\includegraphics[width=0.95\textwidth]{img/communicationattacks.png}
\caption{Confused deputy attack (a); Collusion attack (b) - (taken from Duarte \cite{nunoduarte}).}
\label{fig:communicationattacks}
\end{figure}
Along with access control enforcement, some concerns have been raised about the security of Android's standard \ac{IPC} mechanism. This mechanism allows different processes, i.e., applications, to communicate with each other, and controls how apps access system components. However, this mechanism can be abused by applications, as they can leverage it to access unauthorized resources or data. In this section we describe two attacks on Android's IPC mechanism followed by systems developed to mitigate such attacks. The attacks described are called \emph{Confused Deputy Attacks} and \emph{Collusion Attacks} and are represented in Figure~\ref{fig:communicationattacks}.
Confused deputy attacks basically consist of unprivileged applications taking advantage of other applications’ publicly accessible APIs to perform an unauthorized action. If App 2 publicly allows other apps to use the Internet through its API, then the user is unaware that App 1 can use the Internet without explicitly requesting it in its manifest. Collusion attacks consist of an app that although not granted the permission to perform an action, can nevertheless perform it. This is possible if there exists another app, belonging to the same developer and installed on the user’s device, with the permission to perform said operation. This happens because Android’s permission system is based on UIDs.
Confused deputy attacks allow applications to use resources without explicitly specifying the necessary permission to do so. To mitigate such attacks the research community developed Saint~\cite{ongtang2012semantically} and QUIRE~\cite{dietz2011quire}. Saint was created to specify which apps can access the public APIs of another app. QUIRE denies access to an API if the message exchanged between apps stems from an unauthorized source. QUIRE does this by analysing the full call chain context of the message, which contains the source of the request.
Collusion attacks are based on a malicious developer building a legitimate application and persuading the user to install a second app with different permissions. Both apps can collude to leak sensitive information by cooperating with each other. XManDroid~\cite{bugiel2011xmandroid} extends the Android permission model in order to support policies that could constrain the way apps interact with each other. This system prevents data leakage or other types of collusion attacks by populating a graph at runtime with a representation of the apps’ interactions, which is then used to regulate inter-app communication.
Although these systems can be used to avoid promiscuous communication between applications, they do not support the development of apps with the goal of displaying sensitive content securely.
\subsubsection{Privacy Enhancement Systems}
The mechanisms reviewed so far cover the security issues involved when an application \emph{1)} reads data from a given resource, or \emph{2)} attempts to share sensitive data with co-located applications. In addition, applications may try to exfiltrate sensitive data to remote untrusted parties, which raises privacy concerns. In fact, the biggest concern for privacy-sensitive mobile applications is data leakage, specially with valuable data such as health records. In this section we describe systems which aim to control the flow of sensitive data in order to assess whether such data leaves the systems as a consequence of a malicious action.
Systems like MockDroid~\cite{beresford2011mockdroid}, TISSA~\cite{zhou2011taming} and IdentiDroid~\cite{shebaro2014identidroid} are extensions to Android's data access control mechanism and prevent untrusted applications from accessing sensitive data. This is done by allowing users to manually specify application access rights over the system services, such as geographic location. These systems may also provide data shadowing\footnote{Data shadow is the return of empty or incorrect information instead of the intended data.} to return plausible but incorrect results to the requesting application.
Alternative systems use a different approach to solve the sensitive data disclosure problem. Unlike data access control mechanisms, dynamic taint analysis systems, such as TaintDroid~\cite{enck2014taintdroid}, prevent data leakage by tainting data with a specific mark and then evaluate how this data is propagated through the system. If this data attempts to leave, the user is then alerted. This system suffers from limitations such as \emph{(i)} tracking a low number of data sources (mainly sensors), \emph{(ii)} performance overheads not tolerable for most mobile environments, \emph{(iii)} the existence of false positives leading to access control circumvention and \emph{(iv)} the incapacity of analysing sensitive information leakage through covert channels.
Although these systems can be leveraged in order to understand if sensitive data is leaving the system through network sinks, they fall short in assessing whether data is accessed via shared resources, such as framebuffers and display devices. This means that a compromised OS may access such resources and disclose sensitive information.
\subsubsection{Trusted Execution Systems}
\label{sec:tee}
In all previously discussed systems, the mobile applications depend entirely on the integrity and correctness of the operating system. In other words, the trusted computing base includes the full-featured operating system responsible for the app's execution. To reduce the trusted computing base, \emph{trusted execution systems} (see Figure~\ref{fig:relwork}) allow certain (in some cases all) operating system components to be disabled or deemed untrusted and providing a restricted execution environment in which sensitive application code and data can be executed safely.
\paragraph{\textbf{TrUbi}} is a system, developed by Duarte \cite{nunoduarte} and Costa \cite{miguelcosta}, built on top of ASM \cite{heuser2014asm} which allows for flexible system-wide resource restriction. This system may be used to isolate privacy-sensitive apps by killing, freezing or revoking permissions to running applications. With TrUbi it is possible to isolate the execution of a critical application from the remaining apps installed on the system. For example, a simple \ac{PHR} app to display personal health records could be developed with the following premise. When the \ac{PHR} app is running, all the other apps are killed and all the resources blocked. The app could then download the health records from the healthcare provider and show the data to the user. When the user exits the application, their data is encrypted with a key generated from a user password, and only then the resources are released for the other applications. The system was completely isolated during the whole process and the data is stored with encryption (but the key, because it is generated from a user password, is not stored on the system).
Although TrUbi effectively reduces the TCB dynamically by disabling concurrent running processes, it still trusts the operating system's code base, which may be compromised by malware. Furthermore, TrUbi has no support for trusted UI meaning that a compromised OS can access sensitive data by disclosing the content of resources such as graphical framebuffers and display devices. The following systems allow for a more significant reduction of the TCB by isolating the execution environment of sensitive code from that of a full-featured OS.
\paragraph{\textbf{External and Internal hardware security modules}} External hardware security modules represent the classic security solution for embedded applications, which consists of offloading trust from the OS into a dedicated piece of hardware commonly named \emph{trusted element} (e.g., a smartcard). Typically, the trusted element is located outside of the main \ac{SoC}. The main advantage of this solution is that it allows for the encapsulation of sensitive assets inside a physical device specially designed for robust security. Internal hardware security modules are, contrary to external modules, included within the \ac{SoC}. These integrated modules are usually one of two forms: the first is a hardware block designed specifically for managing cryptographic operations and key storage, and the second is a general purpose processing engine, which is placed alongside the main processor. This processing engine uses custom hardware logic to prevent unauthorized access to sensitive resources. This solution has the advantage of being cheaper and offering a performance improvement over external modules.
However, the main disadvantage of both external and internal security modules is that they only provide secure processing and storage functions. This means that some operations (e.g., I/O) must rely on software running outside of the security perimeter to provide the desired features. In both of these hardware-based security solution, sensitive information from the trusted element must always go through the rich OS before it can be written to the framebuffer, opening the window for interception by malicious agents. The next system solves this problem by allowing complete isolation between different execution environments.
\paragraph{\textbf{TrustZone}} is a hardware architecture designed to allow the execution of code in isolation from the rich operating system (see Figure~\ref{fig:trustzone_architecture}). In this architecture, the full-featured operating system runs in the normal world domain, while the trusted code running in the secure world can execute without relying on a complex code base. TrustZone also mitigates performance overheads inherent to software virtualization techniques since the hypervisor mechanism, which manages the trusted and untrusted domains, is implemented natively in hardware.
\begin{figure}[t!]
\centering
\includegraphics[width=0.80\textwidth]{img/trustzone.pdf}
\caption{TrustZone software architecture (adapted from \cite{trustzone_whitepaper}).}
\label{fig:trustzone_architecture}
\end{figure}
TrustZone hardware and software architecture are described in a whitepaper~\cite{trustzone_whitepaper} by ARM. In a TrustZone-enabled system, a physical processor provides two virtual cores, one considered non-secure and the other secure, as well as a robust context switching mechanism known as monitor mode. The NS bit sent on the main system bus identifies which of the virtual cores performed an instruction or data access. Several software architectures can be implemented on the software stack of a TrustZone-enabled processor, but the most powerful one is a dedicated operating system in the secure world, as shown in Figure~\ref{fig:trustzone_architecture}.
This design allows for concurrent execution of multiple secure world apps and services that are completely independent from the normal world environment, thus, even with a compromised normal world, the secure world executes as expected. Moreover, the kernel design can enforce the logical isolation of secure tasks from each other, preventing one secure task from tampering with the memory space of another. These advantages sparked the research community in developing several systems which leverage this technology. For this reason, TrustZone will be discussed in depth in Section~\ref{sec:trustzone}.
\subsubsection{Summary}
After describing the state of the art of general-purpose mobile security mechanisms, we discuss to what extent existing systems have the potential to provide trusted output paths.
Access control mechanisms focus on restricting access to system resources, which may be useful for critical applications if these can use it to limit the access to shared resources by less sensitive applications running in parallel. But these mechanisms rely on a full-blown operating system, with a large and complex TCB, to ensure this property. And this full-blown OS may become compromised by malware and sabotage these security measures. Moreover, these systems do not offer trusted user interfaces as they provide no explicit isolation between the operating system and resources such as display devices and framebuffers. For these reasons, access control mechanisms do not fulfil the needs of this project.
Application communication monitoring focuses on assuring that communication attacks cannot effectively target Android's IPC mechanism, which is the main communication component between applications in the Android platform. Because these systems are not meant to be used for the development of applications, but rather as a protective measure against specific attacks, these mechanisms are not suitable to solve the underlying problem of developing critical applications to securely display sensitive data.
Privacy enhancement systems focus on assessing whether sensitive data leaves the system via network sinks. While these systems are useful in controlling the data flow of traditional paths, such as Android's IPC, it falls short of controlling the data flow of unconventional paths, such as framebuffers and display devices. This means that sensitive data may be intercepted by a malicious OS without the privacy enhancement system knowing it.
Trusted execution systems focus on supporting an isolated environment which comprises a smaller TCB than that of a full-featured OS. This category comprises very different systems, with different purposes and approaches, which we summarize next. TrUbi dynamically reduces the overall system's TCB while a critical application is running. But this is done whilst relying on the operating system, which may be compromised.
External and internal hardware security modules offer an isolated hardware-based environment for the execution of secure tasks. But because both external and internal modules provide only secure processing and storage functions, the data it protects must eventually be used outside the isolated environment, for instance for display purposes. This means that these systems still rely on the untrusted OS for functionality such as trusted UI, which we want to be supported by the secure environment.
Lastly, TrustZone also supports isolated environments running along side one another by leveraging special purpose hardware to isolate both domains, which mitigates the performance overhead of software based isolation. Additionally, TrustZone also supports trusted user interfaces by controlling the necessary peripherals. For this reason, TrustZone will be discussed in depth in the next section, as it supports all the features necessary to implement \emph{ViewZone}.
% TRUSTZONE
\subsection{TrustZone-based Mobile Security Systems}
\label{sec:trustzone}
To improve security, mobile device manufacturers have been designing hardware architectures enhanced with \emph{trusted hardware}. Among the available security architectures there is ARM's TrustZone technology, a trusted hardware which allows the development of a diverse set of security systems and services, such as Samsung KNOX~\cite{knox_whitepaper} and DroidVault~\cite{li2014droidvault}. TrustZone is becoming popular as it supports code to be executed isolated from a full-featured operating system such as Android. This enables a reduction of the trusted computing base of which critical applications depend.
One of the most important uses of TrustZone is building Trusted Execution Environments (TEE), which are compact systems running in the secure world to provide an isolated environment for critical applications. Since its formal standardization by the OMTP in 2007, several \ac{TEE} software stack architectures have been implemented. This standard comprises a set of security requirements on functionality a TEE should support. The GlobalPlatform~\cite{global} organization went a step further by defining standard APIs. On one hand, the TEE internal APIs that a trusted application can rely on, and on the other hand, the communication interfaces that rich OS software can use to interact with its trusted applications.
Table~\ref{tab:trustzonesystemscomparison} categorizes the existing systems in two main dimensions. When surveying the literature, TrustZone-based systems can be divided into two separate groups depending on whether they support general or specific application code hosting: Trusted Kernels, and Trusted Services. Trusted Kernels, which comprise the TEEs, allow the execution of generic code in the secure world environment. And Trusted Services implement special-purpose applications in the secure domain and can run directly on bare metal (e.g., a secure key store, an authentication service, etc.). Orthogonally, one can classify both groups with regards to support of trusted user interfaces (UI). A TrustZone-based system features Trusted UI if it allows secure world components to directly access the mobile interface without interference from the rich OS. Thus minimizing the risk of, for instance, password logging. Since \emph{ViewZone} aims to offer a specific functionality for enabling secure display of application data, our system can be considered as a Trusted Service with trusted UI, which fits the first quadrant of Table~\ref{tab:trustzonesystemscomparison}.
The remainder of this section describes in more detail the existing TrustZone-enabled systems according to all four categories: \emph{(i)} Trusted Kernels with Untrusted UI, \emph{(ii)} Trusted Services with Untrusted UI, \emph{(iii)} Trusted Kernels with Trusted UI and \emph{(iv)} Trusted Services with Trusted UI.
\begin{table}[t!]
\caption{TrustZone-based system categorization.}
\label{tab:trustzonesystemscomparison}
\centering
%\resizebox{\textwidth}{!}{%
\begin{tabular}{ c|c|c| }
\multicolumn{1}{c}{} & \multicolumn{1}{c}{Untrusted UI} & \multicolumn{1}{c}{Trusted UI} \\ \cline{2-3}
\multirow{6}{*}{T. Services} & & \\
& Android Key Store & TrustOTP \\
& DroidVault & TrustDump \\
& Brasser et al. & AdAttester \\
& & TrustUI \\
& & \\
\cline{2-3}
\multirow{7}{*}{T. Kernels} & & \\
& TLK & Genode \\
& OP-TEE & T6 \\
& Andix OS & TrustICE \\
& Nokia ObC & SierraTEE \\
& TLR & Samsung KNOX \\
& & \\
\cline{2-3}
\end{tabular}
%}
\end{table}
\subsubsection{Trusted Kernels with Untrusted UI}
Trusted Kernels have the goal of executing generic code in its isolated environment, and most of theses kernels have similar architectures (similar to the one described in section~\ref{sec:tee}). This architecture is generally composed of a small trusted kernel running in the secure world of TrustZone-enabled processors, a normal world user space client API and a kernel TEE device driver, used to communicate between worlds.
OP-TEE~\cite{op_tee}, TLK~\cite{tlk}, TLR~\cite{santos2011trusted} and AndixOS~\cite{fitzekandix} are \ac{TEE} implementations which share this general architecture. On-board Credentials (ObC)~\cite{kostiainen2012board} is another \ac{TEE} system, originally developed for Nokia mobile devices using the TI M-Shield technology and later ported to ARM's TrustZone. ObC supports the development of secure credential and authentication mechanisms. Although these systems use TrustZone hardware based isolation to ensure that applications running inside the secure world are not modified by a compromised rich OS, they were implemented with the goal of reducing the \ac{TCB} in order to ensure a less vulnerable system. For this reason there are some limitations regarding the features they can support.
A reduced \ac{TCB} means that most features of standard mobile operating systems are not supported. For instance, in both TLR and OP-TEE, as well as AndixOS, the secure world kernel lacks drivers for peripherals such as the touchscreen or code to control the framebuffer. Thus, these systems are not capable of supporting trusted UI. For this reason, these systems do not allow developers to easily build trusted applications for sensitive data display. Instead, these sytems support an RPC-like mechanism for in-between-world communication, secure persistent storage and basic cryptographic systems allowing for the development of simple trusted services.
\subsubsection{Trusted Services with Untrusted UI}
As opposed to Trusted Kernels, which enable the execution of general-purpose application code on the secure world, Trusted Services are designed to implement specific applications in the secure world natively. Some trusted services, such as DroidVault~\cite{li2014droidvault} and Restricted Spaces~\cite{brasserregulating}, use custom trusted kernels to fully control the underlying hardware and execution environment.
A system by Brasser et al.~\cite{brasserregulating}, which will be referred to as Restricted Spaces for the remainder of this section, allows for third-parties (hosts) to regulate how users (guests) use their devices (e.g., manage device resources), while in a specific space. This system comprises authentication and communication mechanisms between the guest's secure world and the host's. It also supports remote memory operations, which allow for configuration changes such as uninstalling peripheral drivers. This can be done by either pointing their interfaces to NULL or to dummy drivers that just return error codes. With this, Restricted Spaces is capable of securely refine permissions using a context-aware approach.
DroidVault~\cite{li2014droidvault} introduces the notion of data vault, which is an isolated data protection manager running in the trusted domain for secure file management in Android. To achieve this, DroidVault adopts the memory manager and interrupt handler from Open Virtualization's SierraTEE~\cite{sierra_tee} and is implemented with a data protection manager, encryption library and a port of a lightweight SSL/TLS library called PolarSSL~\cite{polar}. Much like Restricted Spaces, DroidVault supports world switching through software interrupts, secure boot and even inter-world communication. With this Trusted Service a user can download a sensitive file from an authority and securely store it on the device. The sensitive file is encrypted and signed by the data protection manager before it is stored in the untrusted Android OS, in order to save space in the limited storage capacity available at the secure world.
Android Key Store~\cite{aks} is another security service in Android. This service allows for cryptographic keys to be stored in a container (keystore), so its extraction from the device becomes difficult and so they can be used for cryptographic operations. The encryption and decryption of the container is handled by the keystore service, which in turn links with a hardware abstraction layer module called ``keymaster''. The Android Open Source Project (AOSP) provides a sofware implementation of this module called ``softkeymaster'', but device vendors can offer support for hardware based protected storage by using TrustZone.
% RETIRAR A PARTE DO DROIDVAULT NESTE ULTIMA FRASE?
The main drawback of the Trusted Services mentioned is that they do not fulfil the goals of this project, as they do not support necessary features such as secure display. %On the other hand, DroidVault takes an interesting approach to secure storage support for Trusted Services.
\subsubsection{Trusted Kernels with Trusted UI}
In this section we describe Trusted Kernels, which allow the execution of generic code in the secure world environment, with support for trusted UI. As referenced in Section~\ref{sec:trustzone}, GlobalPlatform defined standard APIs for the communication between the rich OS running in the normal world and the secure OS. However, this organization also defined device specifications that TEEs must comply in order to be certified. Included in these device specifications there is a trusted UI clause, meaning that every TEE which complies with GlobalPlatform's device specifications must support trusted UI.
SierraTEE~\cite{sierra_tee}, T6~\cite{t6_tee} and Open-TEE~\cite{mcgillion2015open} comply with the GlobalPlatform standard, and for this reason allow the development of trusted applications with secure user interfaces. Open-TEE's trusted UI feature is being developed by the community as it was not originally supported. The Genode OS Framework~\cite{genode} is a tool kit for building highly secure special-purpose operating systems to be executed in TrustZone-enabled processors. Genode implements a framebuffer and display drivers to be used by the secure kernel, thus trusted applications running on top of Genode-based TEEs can offer trusted user interfaces.
Samsung KNOX~\cite{knox_whitepaper} is a defense-grade mobile security platform which provides strong guarantees for the protection of enterprise data. Security is achieved through several layers of data protections which include secure boot, TrustZone-based integrity measurement architecture (TIMA) and Security Enhancements for Android (SEAndroid~\cite{smalley2013security}, which was already discussed in Section~\ref{sec:accesscontrol}). Samsung KNOX offers a product called KNOX Workspace, which is a container designed to separate, isolate, encrypt, and protect work data from attackers. This enterprise-ready solution provides management tools and utilities to meet security needs of enterprises large and small. Workspace provides this separate secure environment within the mobile device, complete with its own home screen, launcher, applications, and widgets.
Unlike the solutions previously described, such as Genode OS and Samsung KNOX, which provide isolated computing environments in the secure world, TrustICE~\cite{sun2015trustice} aims at creating Isolated Computing Environments (ICEs) in the normal world domain. For this reason, TrustICE's architecture is slightly different from those described before.
Figure~\ref{fig:trustICE_architecture} compares TrustICE's architecture with that of a traditional TrustZone TEE, where trusted applications run inside the secure world domain. TrustICE works by implementing a trusted domain controler (TDC) in the secure world, which is responsible for suspending the execution of the rich OS as well as other ICE's in the system when another ICE is running. Thus, TrustICE supports CPU isolation for running ICE's. For memory isolation a watermarking mechanism is implemented so the rich OS cannot access secure code running in the normal world memory. In order to isolate I/O devices the secure world blocks all unnecessary external interrupts from arriving at the TDC, thus protecting the TDC from being interrupted by malicious devices, the exception being a minimal set of required interrupts to allow trusted UI.
Because these systems support secure display they are adequate for the development of trusted applications which require sensitive information to be displayed to the user. However, developing such applications is complex because the application development environments are cumbersome and error-prone.
\begin{figure}[t!]
\centering
\includegraphics[width=0.80\textwidth]{img/trustICE.pdf}
\caption{Architecture comparison between traditional TrustZone's software stack and TrustICE (adapted from~\cite{sun2015trustice}).}
\label{fig:trustICE_architecture}
\end{figure}
\subsubsection{Trusted Services with Trusted UI}
Besides Trusted Kernels, some Trusted Services offer secure display to applications implemented in the secure domain. The technical challenge is implementing drivers in the secure world without greatly increasing the TCB, and this is solved by implementing very small special-purpose display drivers. Several Trusted Services with trusted UI support have been proposed in the literature.
TrustOTP~\cite{sun2015trustotp} is a One-Time-Password (OTP) system, secured by hardware, where the OTP is generated based on Time and a Counter secured by TrustZone's memory management. Most trusted applications described before require inter-world communication to trigger the world-switching mechanism. This system leverages hardware interrupts to trigger the world-switch. This mitigates denial-of-service attacks by a malicious rich OS which may control the inter-world communication mechanism and intercept the calls (software interrupts) required to trigger the world-switch.
Providing a different service, TrustDump~\cite{sun2015reliable} is a secure memory acquisition tool that offloads the memory through micro-USB. Similarly to TrustOTP, this system relies on hardware interrupts to trigger world-switches. This solution may be implemented by systems which require no inter-world communication, but for systems which need to offer seamless integration with the normal world this approach may have to be leveraged with other development strategies. Both of these systems support trusted UI by implementing secure display and input drivers, as well as display controllers to manage the secure framebuffers.
Instead of implementing the required drivers to support trusted UI, some systems designed mechanisms to allow the reuse of untrusted drivers, implemented in the rich OS, by the secure world domain. TrustUI~\cite{li2014building} excludes the device drivers for input, display and network from the secure world, and instead reuses the existing drivers from the normal world, thus achieving a much smaller TCB than previously described systems. Because we are only interested in secure display, the following explanation discards the network delegation mode and input mechanism. To achieve trusted UI, device drivers are split into two parts: a backend running in the normal world domain and a frontend running in the secure world. Both parts have corresponding proxy modules running in both worlds, which communicate via shared memory. Whenever secure display is necessary, the frontend asks for a framebuffer from the backend driver and sets that memory region as secure only, thus isolating the framebuffer from rich OS manipulation.
This mechanism can still be victim of framebuffer overlay attacks, where a malicious backend driver gives a false framebuffer to the secure world. For this reason, the system randomizes the background and foreground colours used in the display and uses two LEDs, controlled by the secure world, to show these same colours. A user can visually check if the colours shown in the secure LEDs match those of the display. If they match then the user is assured that the display shown is being controlled by the secure world.
These systems support secure display, as such, they do not disclose sensitive data. However, TrustOTP and TrustDump do not offer a fully integrated environment with the Android running in the normal domain. TrustUI, on the other hand, fully integrates its environment with that of the Android operating system. Furthermore, TrustUI describes a novel mechanism for the reuse of untrusted driver in the normal world without compromising security, which significantly reduces the secure system's TCB. The main disadvantage of TrustUI is that, similarly to TrustOTP and TrustDump, does not support the development of generic display applications. Besides, TrustUI is not immune to denial-of-service attacks by a malicious operating system running in the normal world, which may compromise the execution of the secure system.\\
From the systems described in this section we learned the underlying strategies for developing security systems using TrustZone. For this reason, the following section describes the architecture of \emph{ViewZone}, a solution for securing the output of Android applications using TrustZone. | {
"alphanum_fraction": 0.809351862,
"avg_line_length": 189.0827586207,
"ext": "tex",
"hexsha": "4277e2174ba2cea55f486570a2e691b0fb8aafc6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7607d4fc53d5c7f686779c42050d67362440cba0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tiagolb/TESE-projecto",
"max_forks_repo_path": "sections/5-related-work.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7607d4fc53d5c7f686779c42050d67362440cba0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tiagolb/TESE-projecto",
"max_issues_repo_path": "sections/5-related-work.tex",
"max_line_length": 1177,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7607d4fc53d5c7f686779c42050d67362440cba0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tiagolb/TESE-projecto",
"max_stars_repo_path": "sections/5-related-work.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 10957,
"size": 54834
} |
\section{Conceptual Model}
\label{sec:performance-modeling-conceptual-model}
A conceptual model describes the target system in terms of
(i) the states it can assume over time,
(ii) the events that let it change in time and
(iii) system assumptions.
We consider the conceptual model depicted in Figure~\ref{fig:conceptual-model-1} and \ref{fig:conceptual-model-2}, respectively for the system running the off-loading algorithm 1 and 2.
In both models, we introduced the \textit{Controller (CTRL)} component within the Cloudlet to represent the decision process of the off-loading policy.
\begin{figure}
\includegraphics[width=\columnwidth]{fig/conceptual-model-1}
\caption{Conceptual model of the system with with OP1.}
\label{fig:conceptual-model-1}
\end{figure}
\begin{figure}
\includegraphics[width=\columnwidth]{fig/conceptual-model-2}
\caption{Conceptual model of the system with OP2.}
\label{fig:conceptual-model-2}
\end{figure}
\paragraph{State space}
The state space $S$ of a system is a comprehensive characterization of the system at any given time.
The state space of the whole system is represented by the state space of its subsystems:
\begin{itemize}
\item \textbf{Cloudlet}: $S_{clt} := \{(n_{clt,1},n_{clt,2})\in \mathcal{N}^{2}: n_{clt,1}+n_{clt,2}\leq N\}$, where $n_{clt,i}$ is the population of tasks belonging to the $i$-th class within the Cloudlet.
\item \textbf{Cloud}: $S_{cld} := \{(n_{cld,1},n_{cld,2})\in \mathcal{N}^{2}\}$, where $n_{cld,i}$ is the population of tasks belonging to the $i$-th class within the Cloud.
\end{itemize}
\paragraph{Events space}
An event is an occurrence that could change the state of the system at the event time, according to the event type.
We consider the following events:
\begin{itemize}
\item \textbf{arrival event $A_{clt,i}$:} a task belonging to the $i$-th class arrives to the Cloudlet.
\item \textbf{arrival event $A_{cld,i}$:} a task belonging to the $i$-th class arrives to the Cloud.
\item \textbf{completion event $C_{clt,i}$:} a task belonging to the $i$-th class is completed by the Cloudlet.
\item \textbf{completion event $C_{cld,i}$:} a task belonging to the $i$-th class is completed by the Cloud.
\item \textbf{interruption event $I_{i}$:} a task belonging to the $i$-th class is interrupted in the Cloudlet and restarted in the Cloud\footnote{notice that the interruption event is possible only for $2^{nd}$ class tasks when the Off-Loading Policy 2 is adopted.}.
\end{itemize}
\paragraph{Assumptions}
The following assumptions hold for the modeled system and ensure that we can adopt the \textit{Next-Event Simulation Model}:
\begin{itemize}
\item \textbf{Stochastic:} the system behavior is driven by some random components, such as task arrivals and service times.
\item \textbf{Dynamic:} the state of the system evolves with the time during a finite observation period. Notice that even if this period may be long enough to reach a steady state, it is finite anyway.
\item \textbf{Discrete:} the state of the system evolves as a step-wise function. Notice that this assumption holds because both the state space and the time space are discrete, i.e. the state space is in $\mathcal{N}^{i}$ for some $i$.
\end{itemize}
Other useful assumptions, even if not necessary for the next event simulation, are:
\begin{itemize}
\item \textbf{Conservative:} it is not possible to have idle resources as long as there are unprocessed tasks; alternatively speaking, as soon as a resource completes the service for a task, it immediately starts with the next eligible task (if any), with no idle time in between.
\item \textbf{Flow Balanced:} the number of completed tasks is equal to the number of arrived tasks in the observation period; alternatively speaking, given an observation period, every task that arrives to the system, it is served by the system within the same period. This assumption may sounds unacceptable given that the Cloudlet may reject some tasks, but it holds anyway because such tasks are not dropped but sent to the Cloud that, having infinite resources, always guarantees them to be served.
\end{itemize} | {
"alphanum_fraction": 0.7616293083,
"avg_line_length": 62.8636363636,
"ext": "tex",
"hexsha": "9ed29db8a03c173585b19074c45c7efd291edbd5",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-02-17T13:30:49.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-02-17T13:30:49.000Z",
"max_forks_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "gmarciani/research",
"max_forks_repo_path": "pydes/sec/performance-modeling-conceptual-model.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "gmarciani/research",
"max_issues_repo_path": "pydes/sec/performance-modeling-conceptual-model.tex",
"max_line_length": 505,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "gmarciani/research",
"max_stars_repo_path": "pydes/sec/performance-modeling-conceptual-model.tex",
"max_stars_repo_stars_event_max_datetime": "2018-07-20T12:54:12.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-07-27T13:31:43.000Z",
"num_tokens": 1112,
"size": 4149
} |
\section{Introduction}
%Offensive language and out-of-topic discussions give the impression that humans are unable to debate in the feeling of pseudonymity.
Online news outlets are drowning in the vast quantity of user comments. While there are certainly high-quality comments that, i.e., give a new perspective to a story, there are as well comments which are rather unwelcome. They range from out-of-topic discussions to use of offensive language. A large number of websites are closing their comment sections because they cannot cope with the sheer amount of user-generated content. News organizations already have economic difficulties\footnote{\url{https://en.wikipedia.org/wiki/Decline_of_newspapers}} and they cannot afford to moderate comments. However, with the help of new Natural-Language Processing (NLP) and machine learning techniques, there is hope to automatically analyze user comments. As a consequence, it frees up resources from moderation and opens the comment sections again.
There already exist works to detect hate speech or abusive language in user-generated content \cite{hateoffensive, risch_delete_nodate, Nobata:2016:ALD:2872427.2883062, schmidt2017survey}. In this work, we want to take a different perspective on the issue. Instead of eliminating `bad' we want to identify high-quality (or `good') comments. We achieve this by classifying according to fine-grained criteria that constitute those comments. Such criteria are, for instance, whether a comment is about the topic of the article or if it supports its claims with an argument. In this master's thesis, we want to achieve a higher precision and recall on classifications on corpora of news comments reported in the scientific literature. The detailed description of what classes we want to predict on which datasets are given in further sections. The successful classification of specific fine-grained criteria allows us to (hopefully) detect high-quality comments. For the assignment of which criteria belongs to `bad' comments or `good', we rely on research in the field of media studies.
In contrast to prior work, we assume that the article and other comments are important to be considered when making classifications. To illustrate our motivation, we take two humanly-annotated comments from a Canadian newspaper \cite{kolhatkar2018sfu}: ``Time for the elders and chiefs to stand up to the plate and take a leadership role!". This comment is labeled as constructive (or in other words: high-quality) and ``Maybe this will motivate the cabbies in TO to clean their filthy cars! That are a disgrace.'' This is labeled as non-constructive. For us as humans, it is hard to make a judgment without reading the article first. Moreover, the annotators were required to read the article first before deciding whether an article is constructive or not. So we should be fair and also give the machine the possibility to obtain the context before classifying.
\begin{figure}
\vspace*{0.2in}
\hspace*{-0.1in}
\includegraphics[width=0.5\textwidth]{comments}
\caption[Caption for LOF]{An opinion article of the Guardian about the UK Prime Minister Theresa May received 2391 comments within two days.\protect\footnotemark}
\label{fig:model}
\end{figure}
\footnotetext{\url{https://www.theguardian.com/commentisfree/2018/oct/03/the-guardian-view-on-theresa-mays-speech-getting-by-on-borrowed-time}}
In order to formally define the problem, let $C$ and $A$ be the set of all comments and articles respectively. In addition, we define:
\\\\
$\text{isArticle}=\{(c, a)| \forall (c,a) \in C \times A: c\text{ is a comment of }a\}$
\\
$\text{isSurrounding}=\{(c, o, a)| \forall (c,o, a) \in C \times C \times A:$
$\quad\text{isArticle(}c, a\text{)} \wedge \text{isArticle(}o, a\text{)} \wedge c \neq o\}$
\\\\
The training set $T =\{(c_n, a_n, s_n, y_n)\}^N_{n=1}$ consists of quadruple-wise data, where $c \in C$, $a \in A$, $s \subseteq \{o | \forall o \text{ isSurrounding(}c,o,a)\}$ and $(c, a) \in \text{isArticle}$. In addition, $y \in Y $ is the corresponding label for $Y=\{1,..., l\}$ classes. We wish to learn a classifier $\gamma$ that maps a comment, its article, and its surrounding comments to classes: $\gamma: C \times A \times C^* \rightarrow Y.$
% TODO: describe the formula so humans understand it
% \begin{figure}
%% \vspace*{0.2in}
%% \hspace*{-0.1in}
% \includegraphics[width=0.5\textwidth]{tree}
% \caption{df}
% \label{fig:tree}
%\end{figure}
%large number of people does not necessarily lead to justified judgments – as seen for instance in the Brexit referendum.
In addition, we present a method of harnessing user votes to derive a comparable popularity score for each comment. We assume that the popularity of a comment is a rough proxy for its quality. So, with a comparable score, we can, i.e., divide comments into high-quality and low-quality comments. Nevertheless, we are aware that a high number of up-votes does not necessarily indicate a good comment. A German right-wing internet group `Reconquista Germanica'\footnote{\url{https://en.wikipedia.org/wiki/Social_media_disruption_by_far-right_groups_in_Germany}} coordinates attacks on their political opponents -- among other things -- in the comment section. They flood it with racist remarks and also up-votes them. Consequently, considering those comments and votes may lead to confounding good with racist. However, we still assume that, especially in an enormous corpus, the vote of the crowd has a meaning. News aggregators such as Hacker News\footnote{\url{https://news.ycombinator.com/}} or Reddit\footnote{\url{https://www.reddit.com/}}, live from their user votes to identify high-quality content. Still, there are a lot of factors that come into play when people vote and our method only operates on the number of votes at one specific moment in time. We neither know who casts these votes nor when. This may not be enough to come to a reasonable, objective, and comparable popularity score. We will critically investigate our method on multiple existing comment datasets before drawing conclusions.
So what is the bigger picture of user comments? Why exactly is it important to have them? Glenn Greenwald\footnote{\url{https://theintercept.com/2017/12/18/comments-coral-project/}} states:
\begin{quote}
``Journalists often tout their responsibility to hold the powerful accountable. Comments are a way to hold journalists themselves accountable.''
\end{quote}
So, one facet is about controlling the journalists themselves. Before the Internet, people could only send letters to the editors to express their opinions. A fraction of those letters was printed but those letters were obviously moderated strictly. Thus, the introduction of online news comments was an emancipatory act of depriving journalists of their role as gatekeepers. For the first time, a debate about an issue could happen in the wider public. This touches on the discourse ethics\footnote{\url{https://en.wikipedia.org/wiki/Discourse_ethics}} as proposed by J{\"u}rgen Habermas into practice. The principle of discourse ethics is that a group of people with a rationale debate comes to a common conclusion which manifests the morale. This is a stark contrast to Immanuel Kant's categorical imperative which focuses on individuals' convictions. The theoretical concept of the discourse ethics can be set in practice in the comment section albeit in some variation. In essence, it is about the belief that the collective can come to better conclusions than a sole person. The journalist who wrote the article has a limited view on the issue. Over the course of the debate in the comment section, all participants will eventually reach a common conclusion. But to do so, they have to follow specific discourse rules -- an idealistic scenario. Nevertheless, J{\"u}rgen Habermas, and other philosophers, thoughts can help us to answer fundamental questions.
The field of journalism is tightly coupled with the technological advancement of our society. Only through the invention of the letterpress by Johannes Gutenberg could humans spread information so fast. This allowed the journalistic profession to establish and flourish. The ongoing digital revolution also affects journalists and there is a long tradition of supporting their work with digital technologies. It started in the late 1960s with computer-assisted reporting\footnote{\url{https://en.wikipedia.org/wiki/Computer-assisted_reporting}} and lead to current ideas about automatic reporting\footnote{\url{https://en.wikipedia.org/wiki/Automated_journalism}}. Right now, there is a larger effort by supporting newspapers in managing their user comments. On example is the unprecedented Coral Project\footnote{\url{https://coralproject.net/}}, a cooperation among Mozilla, the New York Times and the Washington Post, that interviewed more than 400 experts in 150 newsrooms to develop an IT system to manage comments. In the following section, we give a detailed overview of related scientific work of machine-learning-based natural-language processing on news comments.
\section{Related Work}
The literature that is related to this work can be roughly split into two categories. One is about news comments and machine learning applied to it. And the second is about recent trends in NLP with deep learning.
\subsection{News Comments}
With the beginning of Web 2.0, there is an abundance of user-generated content. Some influential earlier work analyzed comments on Digg\footnote{\url{http://digg.com/}} \cite{Gomez:2008:SAS:1367497.1367585, Lampe:2004:SBD:985692.985761}, or predicted the popularity of online content on Youtube and Digg \cite{Szabo:2010:PPO:1787234.1787254} or Reddit \cite{Rizos:2016:PNP:2872518.2890096}. Coming from the area of human-computer interaction, Park et al. \cite{Park:2016:SCM:2858036.2858389} build a system to manage comments by incorporating traditional, feature-based machine learning.
Recent work focused on identifying moderator-picked comments on New York Times website, so-called \textit{NYT Picks}. Nicholas Diakopoulos \cite{diakopoulos2015picking} presents nine criteria for comments that distinguish NYT Picks from non-NYT Picks. Three criteria can be computed. Kolhatkar and Taboada \cite{kolhatkar_using_2017} predict the NYT Picks with traditional, feature-based machine learning as well as deep learning. They achieved an F1 score of $0.84$. In their follow-up work, they constructed the \textit{SFU Opinion and Comments Corpus} \cite{kolhatkar2018sfu}, where they labeled over 1000 comments for \textit{constructiveness} and they achieve an accuracy of $72.59\%$ \cite{kolhatkar2017constructive}. They define constructive as:
\begin{quote}
``Constructive comments intend to create a civil dialogue through remarks that are relevant to the article and not intended to merely provoke an emotional response. They are typically targeted to specific points and supported by appropriate evidence.''
\end{quote}
Napoles et al. \cite{napoles2017automatically} focused on detecting `good' discussions on a comment thread level. They define `good' discussions as ERIC: Engaging, Respectful, and/or Informative Conversation. They as well created a corpus of comments, the \textit{Yahoo News annotated comments corpus} \cite{napoles2017finding}, labeled a subset of about 10k comments and 2.4k threads and predicted ERIC threads with an F1 score of $0.73$. For German, Schabus et al. \cite{Schabus:2017:OMP:3077136.3080711} constructed a dataset of comments and annotated over 11k of them. They present several approaches and experiment with features-based as well as deep learning based. In their follow-up paper Schabus and Skowron \cite{schabus_academic-industrial_nodate} describe how they resort to a feature-based machine method for usage in a production system. A detailed comparison of all related datasets is presented later in Section~\ref{sec:commcorp}.
The work so far did not consider the context of an article. Namely, the article and also other comments. The very recent of work by Cheng et al. \cite{2018arXiv180807191C} considers the abstract of the news article as well as surrounding comments to classify comments. It adapts the text matching methods of Wang et al. \cite{wang_bilateral_2017} that uses an LSTM \cite{hochreiter1997long} with attention mechanism. However, their work has one weakness. They interpret all comments with over 10 up-votes as positive and the rest as negative samples in a binary classification problem. This is a gross simplification for multiple reasons. For instance, earlier comments are more likely to get more up-votes. So they may only predict comments that appear earlier in the discussions than others. Even though they achieved an accuracy of 70.75\% and an F1 score of 0.8073, their true contribution is unclear. Nevertheless, their deep learning network architecture of combining comment, article, and other comments can be a starting point for our work.
There has been significant work on detecting hate speech and offensive language with NLP. A closer look is out of the scope of this thesis and the interested reader is guided to a survey by Schmidt and Wiegand \cite{schmidt2017survey}. In particular, one paper by Gao and Huang \cite{Gao_2017} is worth mentioning because they work on context-aware classification. They as well point out that work on comments neglects its context. They developed an architecture of three parallel LSTMs, one for the text, one for the author, and one for the article. The three LSTMs are combined into a classifier. They constructed their own datasets of annotated tweets that relate to news articles. In their experiments, they claim that their context-aware model outperforms the one without context. Unfortunately, they did not apply their method to a commonly used dataset to put their contribution into perspective.
Loosely related is the work by Qin et al. \cite{2018arXiv180503668Q} who automatically generate high-quality comments. Also, the problem of \textit{stance detection} is related. In this field, it is about detecting whether a response to a statement is affirmative or opposing. Some promising work has been carried out by Kochkina et al. \cite{kochkina2017turing}. In addition, the prediction of the helpfulness of user products reviews done by Singh et al. \cite{SINGH2017346} is relevant. Another work on product reviews and recommender system has been done by Zheng et al. \cite{Zheng:2017:JDM:3018661.3018665}. They encoded the product description as well as reviews with an LSTM respectively before combining the two sub-networks into a classifier.
Outside of the computer science community, there exists qualitative analysis of comments that should guide our work. Loosen et al. \cite{loosen_making_2017} formulated several comment quality indicators after conducting interviews with news professionals as well as developing a software mockup. Among other things, they list `comment length' and `readability' but also the `reference to the article' and `references to other comments' as an indicator. Their work is built upon earlier research done by Diakopoulos et al. \cite{Diakopoulos:2011:TQD:1958824.1958844} and Noci et al. \cite{noci2012comments}. There is also an abundance of comment guidelines that outline good comments. The New York Time writes in their guidelines\footnote{\url{https://help.nytimes.com/hc/en-us/articles/115014792387-Comments}}: ``We are interested in articulate, well-informed remarks that are relevant to the article.'' Also, the community guidelines by the Guardian requires commentators to ``[k]eep it relevant''\footnote{\url{https://www.theguardian.com/community-standards}}.
So the relation to the article plays a role when judging comments for its quality. Current machine learning methods often are too simplistic and neglect this feature all-together.
\subsection{Transfer Learning with Language Models}
Transfer learning describes methods on how to re-use (intense) computations on downstream tasks. Pre-trained word embeddings, as popularized by Word2Vec \cite{DBLP:journals/corr/abs-1301-3781}, GloVe \cite{Pennington14glove:global} or fastText \cite{bojanowski2017enriching}, are one example of transfer learning in NLP. Within the deep learning NLP community, there is a trend of abandoning word embeddings in favor of text representations derived from pre-trained language models. There are two main reasons for this: first, the word tokens are global which means that they are context-agnostic. This is a drawback because there might be multiple meanings for a word. For instance, `bank' can describe a financial institution or an embankment. The true meaning in a sentence can only be inferred by considering the context. Second, the meaning of the word is lost after the embedding layers. Subsequent, deeper layers do not have access to the `meaning' of a word that was injected through the word embeddings.
Language Models (LM) try to predict the next word based on previous words. This is a challenging task. In such a manner, the model has to learn the nuances of a language over long sequences of text. A good thing is that they do not require annotated data and there is an abundance of raw text freely available (e.g. from Wikipedia). After training an LM, there is the hope of transforming its capabilities to other tasks. The Elmo embeddings by Peters et al. \cite{peters2018deep} use the potential of LM to surpass earlier work on contextual word embeddings by McCann et al. \cite{mccann2017learned} (CoVe). They use the internal state of a language model to obtain vectors as a building block that can directly replace traditional word embeddings. Similar work has been done by of Abkib et al. \cite{akbik_contextual_nodate} for Flair\footnote{\url{https://github.com/zalandoresearch/flair}}. Peters et al. \cite{peters_dissecting_2018} describe LMs and their use in text representation in more detail in their recent publication.
Howard and Ruder show with Ulmfit \cite{howard_universal_2018} how to apply transfer learning to fine-tune from an ordinary LM to downstream classification task. They achieved multiple state-of-the-art performances on text classification\footnote{\url{http://nlpprogress.com/text_classification.html}} and sentiment detection\footnote{\url{http://nlpprogress.com/sentiment_analysis.html}}, e.g. on the IMDb dataset\footnote{\url{https://ai.stanford.edu/~ang/papers/acl11-WordVectorsSentimentAnalysis.pdf}}. In contrast to Elmo, Ulmfit consists of a full architecture and methodology to apply text classification to real-world data. They point out that the challenging part is the method of how to fine-tune from the LM to the task. The OpenAI transformer by Radford et al. \cite{radford2018improving} also starts with a trained LM and \textit{transforms} it to downstream tasks. Their performance for text classification is below than Ulmfit. But they provided also ways to transform the LM to other tasks such as question answering. Both papers highlight that there is far more to explore in the field of LM fine-tuning. There is a comparison of several approaches done by Perone et al. \cite{2018arXiv180606259P} -- unfortunately without Ulmfit or the OpenAI transformer.
Transfer learning in NLP is currently a hot topic and there might be no clear answers yet. There are also other approaches without LM, i.e., the `Universal Sentence Encoder' by Cer et al. \cite{cer2018universal}. They also create context-aware text representation but build upon their prior work about a custom artificial neural network architecture.
\section{Context-aware Classification of News Comments}
\label{sec:methods}
%In order to select comments that are worth promoting, we use a two-step process. First, we select `good' comments. Then we further filter those comments and promote only a selection to avoid duplicates.
%\subsection{Identifying `Good' Comments}
%\subsection{Clustering `Good' Comments}
%
%For the second part, we cluster those comment and present only one (most likely the first one) out of each cluster. We will use topic modeling to find the clusters [not sure which one]. Right now it is open how to rank the comments. We may need additional labeled data for it or just refrain to a time-based ordering for the good comments.
We want to draw from ideas of the recent developments in NLP to classify news comments considering their context. We propose and evaluate several deep learning architectures. We interpret the problem as a combination of text classification and text matching. We follow the recent advancements of language model fine-tuning as done by Ulmfit \cite{howard_universal_2018} and the OpenAI Transformer \cite{radford2018improving}. We will first present several context-unaware baselines approaches before describing our context-aware architectures.
\subsection{Baseline Models}
The baseline methods range from traditional feature-based approaches to well-established deep learning approaches.
All of them do not have the context of the article. For the features, the work by Kolhatkar and Taboada \cite{kolhatkar_using_2017} with features such as average word/sentence/comment length is a starting point. And also stylometric features as described by Potthast et al. \cite{2017arXiv170205638P} should help to distinguish certain quality criteria. Most importantly we use well-established, i.e., attention-based deep learning architectures. Further, we will use text classification with the original Ulmfit \cite{howard_universal_2018} solely on the comment's text. This way we have to ensure that the additional context is useful.
\subsection{Context-aware Models}
We propose three different directions for context-aware models. For the beginning, we limit the models to only consider the article as context and not the surrounding comments.
First, we start with simple architectures using multiple recurrent neural networks, i.e., LSTMs, in parallel. One for the comment and one for the article before joining together in a fully connected layer. This follows the work of Gao and Huang \cite{Gao_2017} who used it to classify hate speech tweets. Or Zheng et al. \cite{Zheng:2017:JDM:3018661.3018665} who used it for product reviews.
\begin{figure}
\includegraphics[width=0.5\textwidth]{ma}
\caption{Overview of the model architecture.}
\label{fig:model}
\end{figure}
Second, we propose a model architecture consisting of two sub-networks, one only on the comment's text and one also for the text matching between the article's text and the comment's text. Both sub-structures are combined into a final layer to classify the comments. For the text classification, we follow the work of Ulmfit \cite{howard_universal_2018}. For the text matching, we follow the work of the OpenAI transformer \cite{radford2018improving}. Text matching describes the general research problem whether two texts match while the matching criteria can be defined. Examples of text matching are question answering. So the general idea is that we first train a general language model on a large text corpus. Then, transform and fine-tune the trained language model to our task. An overview of the complete architecture is shown in Figure~\ref{fig:model}. The whole process of fine-tuning looks as follows:
\begin{enumerate}
\item Train a language model on a large, un-labeled corpus (or use a pre-trained one)
\item Re-train the language model on an un-labeled corpus of comments
\item Fine-tune the model on a labeled sub-set of comments
\end{enumerate}
Third, we want to experiment with whether classification criteria `off-topic' can be solved as a text matching problem. To our knowledge, this has not tried out so far. This would make the complex two-step architecture proposed in the previous paragraph superfluous.
\newpage
\subsection{Further Ideas}
To further improve the model, we could incorporate external knowledge into our model. Some concrete information about people and e.g. their affiliation, i.e., political party membership, may not be in the training data. One way would be to use a Named-Entity Recognition (NER) method and then to look up the entity on external knowledge resources such as Wikipedia or Wikidata.
\section{Data}
%\tablefootnote{\url{https://github.com/sfu-discourse-lab/SOCC}},
%\tablefootnote{\url{https://github.com/cnap/ynacc}}\cite{napoles2017finding}
%\tablefootnote{\url{https://ofai.github.io/million-post-corpus/}}
In this section, we will give an overview of available partly-annotated data sources as well as a method to harness user votes.
\subsection{Corpora of News Comments}
\label{sec:commcorp}
\begin{center}
\begin{table}
\begin{tabular}{| p{2cm} | p{2.8cm} | p{2.7cm} |}
\hline
Dataset & Description & Annotations \\ \hline
SFU Opinion and Comments Corpus \cite{kolhatkar2018sfu} (SOCC), in English & 10k articles with their 663k comments from 303k comment threads, from 2012 to 2016, from Canadian newspapers, with up- and down-votes & 1,043 comments in responses to 10 articles, labeled for constructiveness and toxicity \\ \hline
Yahoo News Annotated Comments Corpus \cite{napoles2017finding} (YNACC), in English & 522k comments from 140k threads posted in response to Yahoo News articles, with up- and down-votes & 9.2k comments labeled for agreement, audience, persuasiveness, sentiment, tone, (off-)topic and 2.4k threads labeled for agreement, constructiveness, type \\ \hline
One Million Posts Corpus \cite{Schabus:2017:OMP:3077136.3080711, schabus_academic-industrial_nodate} (OMPC), in German & 12k articles, 1M comments, from an Austrian Newspaper, with up- and down-votes & 11k comments with the following labels: sentiment, off-topic, inappropriate, discriminating, feedback, personal studies, argument used \\ \hline
Tencent News Corpus (TNC) by Qin et al. \cite{2018arXiv180503668Q}, in Chinese & 200K articles and 4.5M comments, from a Chinese news website, 2017, with up-votes & 40k comments labeled for quality (from 1 to 5) \\ \hline
\end{tabular}
\caption{Overviews about annotated corpora of comments.}
\label{table:datasets}
\end{table}
\end{center}
There exists four corpora of news comments where part of the comments are annotated for its quality. Table~\ref{table:datasets} gives an overview about them, two are in English, one in German and in Chinese.
For our work, the YNACC and the OMPC are especially interesting because of their fine-grained annotations. The number of annotated comments in SOCC are too few. TNC has enough data but only one label of quality. YNACC and OMPC have also some overlapping annotation criteria: off-topic and sentiment. This allows us to compare the same method with identical labels on both datasets. Since YNACC contains English and OMPC German comments, we can test our approach on two languages.
Besides, all of the corpora contain comment-level user votes. In order to make use of them, a method is presented in the next section.
\subsection{Harnessing User Votes}
\label{sec:data_votes}
News comments allow people to express their opinion about news articles. In the same breath, users are also able to vote on other users' comments. This can be interpreted as a label of the popularity of a comment. However, a problem is that one cannot take the raw number of votes to compare comments. Articles that attract more readers than others receive more comments and also more votes. Comments posted shortly after the article publication are more likely to be read and consequently receive more votes. So to make use of user votes, they first need to be preprocessed to obtain a comparable popularity score.
In our previous, unpublished work, we developed a method to create binary popularity labels out of up-votes as follows:
\begin{enumerate}
\item Remove non-root comments
\item Remove articles with few comments
\item Remove articles with few up-votes
\item Sort comments chronologically
\item Only consider first $N$ comments per article
\item Calculate relative portion of up-votes for each comment under \textit{one} article
\item For each comment rank, classify the first $X$ comments as positive and the last $X$ comments as negative, whereby $X * 2 <= $ number of articles
\end{enumerate}
So we normalize the up-votes first and then, leave out the comments with an average score because we assume that there the `crowd' did not come to clear decision. Only the very popular and the very unpopular comments were used as positive and negative samples respectively. The constants $N$ and $X$ were derived empirically. In this master's thesis, we want to generalize this method and also determine $X$ and $N$ more scientifically. First, we split the whole process into two parts. The development of an objective popularity score and the method of turning this score into classes. Moreover, we want to adapt it to also work for down-votes. This enables us to use this approach on the datasets in Table~\ref{table:datasets} which have down-votes. For this two additional classes are required but how exactly the comments are assigned to a class is due to further research.
Finding a way of harnessing user votes to get meaningful labels would allow us to have a magnitude more of annotated comments. Halevy et al. \cite{halevy2009unreasonable} showed that more data, in general, helps to improve the performance of deep learning models. So with this method applied to datasets as presented in Table~\ref{table:datasets} would give us more data. This gives us the possibility to train a model to detect `good' comments. A hope is that if you feed those data into a deep learning model, it picks of the general tendency and leaves out the outliers. However, these assumptions need to get evaluated and we will describe in the next section.
\section{Evaluation}
We have two different sub-sections for the evaluation because our contribution is two-fold.
\subsection{Context-Aware Classification}
To test the performance of our proposed architectures as described in Section~\ref{sec:methods}, we compare it to our baselines results and results reported in the scientific literature. The authors of OMPC report baseline values \cite{schabus_academic-industrial_nodate} for all their classification criteria. The criteria `off-topic` with a prediction of 0.2579, recall of 0.6241 and an F1 score of 0.3516 leaves room for improvement. Because our model has the relation between comment and article, it ought to achieve better results. Also, the majority of the other criteria have F1 scores ranging from 0.5 to 0.7 that show these problems are far from being solved. (There will be a paper from Hamburg available shortly, where they also work on the OMPC and report F1 scores). It is our goal to outperform all classifications on OMPC in respect to precision and recall (and thus F1 scores). The classification criteria are: sentiment, off-topic, inappropriate, discriminating, feedback, personal studies, argument used.
Unfortunately, there are no reported values for the comments in YNACC –-- only per thread level. For SOCC, the authors \cite{kolhatkar2017constructive} report an accuracy of 0.7259 for identifying `constructive' comments. We claim that the context of a comment is important for its classification. Since in the author's definition of constructive is implied, that it is relevant to the article, we have to outperform them to prove our approach works.
\subsection{Harnessing User Votes}
First, to evaluate our method as proposed in Section~\ref{sec:data_votes}, we compare the resulting classification against the annotated comments in Table~\ref{table:datasets}. There are certain labels such as `constructiveness', `argument used', or `on-topic' that should occur significantly more often in our `popular' comments.
Second, we preprocess a dataset from Table~\ref{table:datasets} with our method and train a model, i.e., with a context-aware architecture. Then, we classify unseen comments. The final classification results are then evaluated by human annotators (10 articles, 100 comments, 5 people). With this, we can evaluate the whole multi-step process. We hope, that the combination of our harnessing method and the deep learning model can predict quality labels of comments.
Third, depending on the course of the master's thesis, we can conduct a larger user study. The study is with ten participants with a quantitative and a qualitative part to determine whether the `popular' comments are actually superior to other comments. First, participants are required to assign quality labels to comments after they read the corresponding article. Participants are asked to read three articles and judge ten comments per article. Second, in a semi-structured interview, we will try to elicit additional information about their decisions to get an understanding of what a good comment is in particular and how it relates to user voting on comments. | {
"alphanum_fraction": 0.7984552476,
"avg_line_length": 165.075,
"ext": "tex",
"hexsha": "e984df5dc12333d7138c80325dd17ce874774c60",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "39a3d9b862444507982cc4ccd98b6809cab72d82",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jfilter/masters-thesis",
"max_forks_repo_path": "writing/expose/expose/samplebody-conf.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "39a3d9b862444507982cc4ccd98b6809cab72d82",
"max_issues_repo_issues_event_max_datetime": "2019-11-05T17:17:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-05T17:17:38.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jfilter/masters-thesis",
"max_issues_repo_path": "writing/expose/expose/samplebody-conf.tex",
"max_line_length": 1508,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "39a3d9b862444507982cc4ccd98b6809cab72d82",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jfilter/masters-thesis",
"max_stars_repo_path": "writing/expose/expose/samplebody-conf.tex",
"max_stars_repo_stars_event_max_datetime": "2020-12-29T06:40:58.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-24T19:45:07.000Z",
"num_tokens": 7464,
"size": 33015
} |
Subsets and Splits