Search is not available for this dataset
text
string | meta
dict |
---|---|
% Options for packages loaded elsewhere
\PassOptionsToPackage{unicode}{hyperref}
\PassOptionsToPackage{hyphens}{url}
%
\documentclass[
]{article}
\title{A Market Segmentation and Purchase Drivers Process}
\author{T. Evgeniou}
\date{}
\usepackage{amsmath,amssymb}
\usepackage{lmodern}
\usepackage{iftex}
\ifPDFTeX
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provide euro and other symbols
\else % if luatex or xetex
\usepackage{unicode-math}
\defaultfontfeatures{Scale=MatchLowercase}
\defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1}
\fi
% Use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\IfFileExists{microtype.sty}{% use microtype if available
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\makeatletter
\@ifundefined{KOMAClassName}{% if non-KOMA class
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}}
}{% if KOMA class
\KOMAoptions{parskip=half}}
\makeatother
\usepackage{xcolor}
\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available
\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}}
\hypersetup{
pdftitle={A Market Segmentation and Purchase Drivers Process},
pdfauthor={T. Evgeniou},
hidelinks,
pdfcreator={LaTeX via pandoc}}
\urlstyle{same} % disable monospaced font for URLs
\usepackage[margin=1in]{geometry}
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\usepackage{graphicx}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
% Set default figure placement to htbp
\makeatletter
\def\fps@figure{htbp}
\makeatother
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{-\maxdimen} % remove section numbering
\usepackage{booktabs}
\usepackage{longtable}
\usepackage{adjustbox}
\usepackage{tikz}
\usetikzlibrary{backgrounds}
\makeatletter
\tikzset{%
fancy quotes/.style={
text width=\fq@width pt,
align=justify,
inner sep=1em,
anchor=north west,
minimum width=\linewidth,
},
fancy quotes width/.initial={.8\linewidth},
fancy quotes marks/.style={
scale=8,
text=white,
inner sep=0pt,
},
fancy quotes opening/.style={
fancy quotes marks,
},
fancy quotes closing/.style={
fancy quotes marks,
},
fancy quotes background/.style={
show background rectangle,
inner frame xsep=0pt,
background rectangle/.style={
fill=gray!4,
rounded corners,
},
}
}
\newenvironment{fancyquotes}[1][]{%
\noindent
\tikzpicture[fancy quotes background]
\node[fancy quotes opening,anchor=north west] (fq@ul) at (0,0) {``};
\tikz@scan@one@point\pgfutil@firstofone([email protected])
\pgfmathsetmacro{\fq@width}{\linewidth - 2*\pgf@x}
\node[fancy quotes,#1] (fq@txt) at ([email protected] west) \bgroup}
{\egroup;
\node[overlay,fancy quotes closing,anchor=east] at ([email protected] east) {''};
\endtikzpicture}
\makeatother
\renewcommand{\familydefault}{\sfdefault} % sans serif
\renewenvironment{quote}{\begin{fancyquotes}}{%
\end{fancyquotes}\ignorespacesafterend% as suggested above
}
\ifLuaTeX
\usepackage{selnolig} % disable illegal ligatures
\fi
\begin{document}
\maketitle
\begin{quote}
\textbf{IMPORTANT}: Please make sure you create a copy of this file with
a customized name, so that your work (e.g.~answers to the questions) is
not over-written when you pull the latest content from the course
github. This is a \textbf{template process for market segmentation based
on survey data}, using the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/Boats-A-prerelease.pdf}{Boats
cases A} and
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/Boats-B-prerelease.pdf}{B}.
\end{quote}
All material and code is available at the INSEAD Data Science for
Business website and GitHub. Before starting, make sure you have pulled
the \href{https://github.com/InseadDataAnalytics/INSEADAnalytics}{course
files} on your GitHub repository. As always, you can use the
\texttt{help} command in Rstudio to find out about any R function
(e.g.~type \texttt{help(list.files)} to learn what the R function
\texttt{list.files} does).
\textbf{Note:} you can create an html file by running in your console
the command
rmarkdown::render(``CourseSessions/InClassProcess/MarketSegmentationProcessInClass.Rmd'')
(see also a
\href{https://github.com/InseadDataAnalytics/INSEADAnalytics/issues/75}{potential
issue with plots})
\clearpage
\hypertarget{the-business-questions}{%
\section{The Business Questions}\label{the-business-questions}}
This process can be used as a (starting) template for projects like the
one described in the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/Boats-A-prerelease.pdf}{Boats
cases A} and
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/Boats-B-prerelease.pdf}{B}.
For example (but not only), in this case some of the business questions
were:
\begin{itemize}
\item
What are the main purchase drivers of the customers (and prospects) of
this company?
\item
Are there different market segments? Which ones? Do the purchase
drivers differ across segments?
\item
What (possibly market segment specific) product development or brand
positioning strategy should the company follow in order to increase
its sales?
\end{itemize}
See for example some of the analysis of this case in these slides: part
1 and part 2.
\clearpage
\hypertarget{the-process}{%
\section{The Process}\label{the-process}}
The ``high level'' process template is split in 3 parts, corresponding
to the course sessions 7-8, 9-10, and an optional last part:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
\emph{Part 1}: We use some of the survey questions (e.g.~in this case
the first 29 ``attitude'' questions) to find \textbf{key customer
descriptors} (``factors'') using \emph{dimensionality reduction}
techniques described in the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/CourseSessions/Sessions23/FactorAnalysisReading.html}{Dimensionality
Reduction} reading of Sessions 7-8.
\item
\emph{Part 2}: We use the selected customer descriptors to
\textbf{segment the market} using \emph{cluster analysis} techniques
described in the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/CourseSessions/Sessions45/ClusterAnalysisReading.html}{Cluster
Analysis} reading of Sessions 9-10.
\item
\emph{Part 3}: For the market segments we create, we will use
\emph{classification analysis} to classify people based on whether or
not they have purchased a product and find what are the \textbf{key
purchase drivers per segment}. For this part we will use
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/CourseSessions/ClassificationProcessCreditCardDefault.html}{classification
analysis} techniques.
\end{enumerate}
Finally, we will use the results of this analysis to make business
decisions, e.g.~about brand positioning, product development, etc.,
depending on our market segments and key purchase drivers we find at the
end of this process.
\clearpage
\hypertarget{the-data}{%
\section{The Data}\label{the-data}}
First we load the data to use (see the raw .Rmd file to change the data
file as needed):
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{\# Please ENTER the name of the file with the data used. The file should be a .csv with one row per observation (e.g. person) and one column per attribute. Do not add .csv at the end, make sure the data are numeric.}
\NormalTok{datafile\_name }\OtherTok{=} \StringTok{"../Sessions23/data/Boats.csv"}
\CommentTok{\# Please enter the minimum number below which you would like not to print {-} this makes the readability of the tables easier. Default values are either 10e6 (to print everything) or 0.5. Try both to see the difference.}
\NormalTok{MIN\_VALUE }\OtherTok{=} \FloatTok{0.5}
\CommentTok{\# Please enter the maximum number of observations to show in the report and slides. }
\CommentTok{\# DEFAULT is 10. If the number is large the report may be slow.}
\NormalTok{max\_data\_report }\OtherTok{=} \DecValTok{10}
\end{Highlighting}
\end{Shaded}
\clearpage
\hypertarget{part-1-key-customer-characteristics}{%
\section{Part 1: Key Customer
Characteristics}\label{part-1-key-customer-characteristics}}
The code used here is along the lines of the code in the reading
\href{https://github.com/InseadDataAnalytics/INSEADAnalytics/blob/master/CourseSessions/Sessions23/FactorAnalysisReading.Rmd}{FactorAnalysisReading.Rmd}.
We follow the process described in the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/CourseSessions/Sessions23/FactorAnalysisReading.html}{Dimensionality
Reduction} reading.
In this part we also become familiar with:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Some visualization tools;
\item
Principal Component Analysis and Factor Analysis;
\item
Introduction to machine learning methods.
\end{enumerate}
(All user inputs for this part should be selected in the code chunk in
the raw .Rmd file)
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{\# Please ENTER the original raw attributes to use. }
\CommentTok{\# Please use numbers, not column names, e.g. c(1:5, 7, 8) uses columns 1,2,3,4,5,7,8}
\NormalTok{factor\_attributes\_used }\OtherTok{=} \FunctionTok{c}\NormalTok{(}\DecValTok{2}\SpecialCharTok{:}\DecValTok{30}\NormalTok{)}
\CommentTok{\# Please ENTER the selection criteria for the factors to use. }
\CommentTok{\# Choices: "eigenvalue", "variance", "manual"}
\NormalTok{factor\_selectionciterion }\OtherTok{=} \StringTok{"eigenvalue"}
\CommentTok{\# Please ENTER the desired minumum variance explained }
\CommentTok{\# (Only used in case "variance" is the factor selection criterion used). }
\NormalTok{minimum\_variance\_explained }\OtherTok{=} \DecValTok{65} \CommentTok{\# between 1 and 100}
\CommentTok{\# Please ENTER the number of factors to use }
\CommentTok{\# (Only used in case "manual" is the factor selection criterion used).}
\NormalTok{manual\_numb\_factors\_used }\OtherTok{=} \DecValTok{15}
\CommentTok{\# Please ENTER the rotation eventually used (e.g. "none", "varimax", "quatimax", "promax", "oblimin", "simplimax", and "cluster" {-} see help(principal)). Default is "varimax"}
\NormalTok{rotation\_used }\OtherTok{=} \StringTok{"varimax"}
\end{Highlighting}
\end{Shaded}
\hypertarget{steps-1-2-check-the-data}{%
\subsection{Steps 1-2: Check the Data}\label{steps-1-2-check-the-data}}
Start by some basic visual exploration of, say, a few data:
\begin{center}
\begin{longtable}{lrrrrrrrrrr}
\toprule
& Obs.01 & Obs.02 & Obs.03 & Obs.04 & Obs.05 & Obs.06 & Obs.07 & Obs.08 & Obs.09 & Obs.10\\
\midrule
Q1.1 & 5 & 3 & 3 & 5 & 4 & 4 & 4 & 4 & 4 & 2\\
Q1.2 & 1 & 2 & 1 & 2 & 2 & 2 & 3 & 3 & 3 & 5\\
Q1.3 & 5 & 4 & 4 & 3 & 4 & 2 & 4 & 4 & 4 & 3\\
Q1.4 & 5 & 4 & 4 & 4 & 5 & 4 & 4 & 4 & 4 & 2\\
Q1.5 & 5 & 4 & 5 & 5 & 5 & 5 & 5 & 4 & 4 & 1\\
\addlinespace
Q1.6 & 5 & 4 & 4 & 5 & 4 & 5 & 5 & 5 & 4 & 3\\
Q1.7 & 5 & 5 & 4 & 3 & 5 & 4 & 5 & 4 & 4 & 5\\
Q1.8 & 3 & 3 & 2 & 3 & 4 & 3 & 5 & 4 & 3 & 4\\
Q1.9 & 5 & 4 & 4 & 3 & 5 & 2 & 3 & 5 & 4 & 3\\
Q1.10 & 4 & 4 & 3 & 4 & 4 & 4 & 1 & 3 & 3 & 4\\
\addlinespace
Q1.11 & 2 & 3 & 2 & 4 & 5 & 2 & 5 & 4 & 5 & 1\\
Q1.12 & 1 & 2 & 2 & 2 & 1 & 2 & 3 & 1 & 1 & 3\\
Q1.13 & 5 & 4 & 5 & 5 & 5 & 3 & 4 & 4 & 4 & 1\\
Q1.14 & 5 & 4 & 5 & 5 & 4 & 4 & 4 & 4 & 4 & 4\\
Q1.15 & 5 & 5 & 5 & 5 & 5 & 4 & 5 & 3 & 5 & 3\\
\addlinespace
Q1.16 & 4 & 3 & 4 & 4 & 5 & 4 & 4 & 3 & 4 & 2\\
Q1.17 & 4 & 3 & 4 & 3 & 5 & 4 & 5 & 4 & 4 & 3\\
Q1.18 & 5 & 5 & 4 & 5 & 5 & 4 & 4 & 4 & 4 & 5\\
Q1.19 & 5 & 4 & 4 & 4 & 5 & 5 & 5 & 4 & 4 & 5\\
Q1.20 & 4 & 3 & 3 & 3 & 4 & 3 & 4 & 3 & 4 & 4\\
\addlinespace
Q1.21 & 5 & 4 & 3 & 5 & 4 & 5 & 5 & 4 & 4 & 5\\
Q1.22 & 5 & 4 & 5 & 4 & 4 & 5 & 5 & 4 & 4 & 4\\
Q1.23 & 5 & 3 & 4 & 5 & 5 & 4 & 5 & 3 & 4 & 5\\
Q1.24 & 5 & 4 & 4 & 3 & 4 & 5 & 5 & 5 & 4 & 4\\
Q1.25 & 5 & 4 & 4 & 5 & 4 & 4 & 5 & 4 & 4 & 5\\
\addlinespace
Q1.26 & 5 & 4 & 5 & 4 & 5 & 4 & 5 & 5 & 5 & 4\\
Q1.27 & 3 & 4 & 3 & 3 & 4 & 4 & 5 & 3 & 5 & 4\\
Q1.28 & 4 & 4 & 3 & 3 & 4 & 4 & 5 & 3 & 5 & 4\\
Q1.29 & 5 & 4 & 4 & 5 & 4 & 4 & 5 & 4 & 3 & 5\\
\bottomrule
\end{longtable}\end{center}
The data we use here have the following descriptive statistics:
\begin{center}
\begin{longtable}{lrrrrrrr}
\toprule
& min & 25 percent & median & mean & 75 percent & max & std\\
\midrule
Q1.1 & 1 & 4 & 4 & 4.03 & 5 & 5 & 0.82\\
Q1.2 & 1 & 2 & 3 & 2.89 & 4 & 5 & 1.01\\
Q1.3 & 1 & 2 & 3 & 3.12 & 4 & 5 & 1.02\\
Q1.4 & 1 & 3 & 4 & 3.89 & 4 & 5 & 0.82\\
Q1.5 & 1 & 3 & 4 & 3.55 & 4 & 5 & 0.93\\
\addlinespace
Q1.6 & 1 & 4 & 4 & 3.95 & 4 & 5 & 0.82\\
Q1.7 & 1 & 3 & 4 & 3.67 & 4 & 5 & 0.90\\
Q1.8 & 1 & 3 & 4 & 3.74 & 4 & 5 & 0.82\\
Q1.9 & 1 & 2 & 3 & 2.89 & 4 & 5 & 1.08\\
Q1.10 & 1 & 3 & 3 & 3.37 & 4 & 5 & 0.93\\
\addlinespace
Q1.11 & 1 & 3 & 4 & 3.46 & 4 & 5 & 1.15\\
Q1.12 & 1 & 2 & 3 & 2.86 & 4 & 5 & 1.01\\
Q1.13 & 1 & 2 & 3 & 3.02 & 4 & 5 & 0.98\\
Q1.14 & 1 & 3 & 3 & 3.25 & 4 & 5 & 0.97\\
Q1.15 & 1 & 3 & 4 & 3.63 & 4 & 5 & 0.89\\
\addlinespace
Q1.16 & 1 & 2 & 3 & 3.10 & 4 & 5 & 1.05\\
Q1.17 & 1 & 2 & 3 & 3.08 & 4 & 5 & 0.98\\
Q1.18 & 1 & 4 & 4 & 4.12 & 5 & 5 & 0.74\\
Q1.19 & 1 & 4 & 4 & 4.20 & 5 & 5 & 0.72\\
Q1.20 & 1 & 2 & 3 & 3.16 & 4 & 5 & 0.97\\
\addlinespace
Q1.21 & 1 & 4 & 4 & 4.25 & 5 & 5 & 0.73\\
Q1.22 & 1 & 4 & 4 & 4.01 & 4 & 5 & 0.74\\
Q1.23 & 1 & 3 & 4 & 3.56 & 4 & 5 & 1.02\\
Q1.24 & 1 & 4 & 4 & 4.11 & 5 & 5 & 0.76\\
Q1.25 & 1 & 3 & 4 & 3.79 & 4 & 5 & 0.91\\
\addlinespace
Q1.26 & 1 & 2 & 3 & 2.95 & 4 & 5 & 1.05\\
Q1.27 & 1 & 2 & 3 & 3.16 & 4 & 5 & 1.05\\
Q1.28 & 1 & 3 & 3 & 3.31 & 4 & 5 & 0.98\\
Q1.29 & 1 & 4 & 4 & 4.03 & 4 & 5 & 0.73\\
\bottomrule
\end{longtable}\end{center}
\hypertarget{step-3-check-correlations}{%
\subsection{Step 3: Check
Correlations}\label{step-3-check-correlations}}
This is the correlation matrix of the customer responses to the 29
attitude questions - which are the only questions that we will use for
the segmentation (see the case):
\setkeys{Gin}{height=\textheight}\adjustbox{width=\linewidth}{
\begin{tabular}{lrrrrrrrrrrrrrrrrrrrrrrrrrrrrr}
\toprule
& Q1.1 & Q1.2 & Q1.3 & Q1.4 & Q1.5 & Q1.6 & Q1.7 & Q1.8 & Q1.9 & Q1.10 & Q1.11 & Q1.12 & Q1.13 & Q1.14 & Q1.15 & Q1.16 & Q1.17 & Q1.18 & Q1.19 & Q1.20 & Q1.21 & Q1.22 & Q1.23 & Q1.24 & Q1.25 & Q1.26 & Q1.27 & Q1.28 & Q1.29\\
\midrule
Q1.1 & 1.00 & 0.01 & 0.11 & 0.20 & 0.18 & 0.27 & 0.18 & 0.09 & 0.08 & 0.11 & 0.14 & -0.05 & 0.12 & 0.18 & 0.26 & 0.16 & 0.15 & 0.25 & 0.27 & 0.19 & 0.24 & 0.23 & 0.19 & 0.21 & 0.23 & 0.10 & 0.13 & 0.18 & 0.20\\
Q1.2 & 0.01 & 1.00 & -0.03 & -0.21 & -0.21 & -0.04 & 0.02 & 0.20 & 0.09 & 0.16 & 0.04 & 0.37 & 0.01 & -0.03 & -0.08 & -0.02 & 0.04 & -0.04 & -0.04 & 0.05 & -0.10 & -0.08 & 0.00 & -0.08 & 0.01 & 0.07 & 0.05 & 0.02 & -0.03\\
Q1.3 & 0.11 & -0.03 & 1.00 & 0.26 & 0.40 & 0.34 & 0.44 & -0.05 & 0.58 & 0.14 & 0.10 & -0.09 & 0.48 & 0.46 & 0.38 & 0.39 & 0.38 & 0.24 & 0.14 & 0.39 & 0.18 & 0.28 & 0.34 & 0.23 & 0.36 & 0.47 & 0.40 & 0.43 & 0.17\\
Q1.4 & 0.20 & -0.21 & 0.26 & 1.00 & 0.37 & 0.20 & 0.18 & 0.00 & 0.17 & 0.10 & 0.06 & -0.16 & 0.27 & 0.29 & 0.30 & 0.18 & 0.17 & 0.18 & 0.19 & 0.18 & 0.18 & 0.23 & 0.16 & 0.23 & 0.22 & 0.19 & 0.17 & 0.21 & 0.19\\
Q1.5 & 0.18 & -0.21 & 0.40 & 0.37 & 1.00 & 0.29 & 0.29 & -0.03 & 0.33 & 0.14 & 0.07 & -0.17 & 0.45 & 0.46 & 0.42 & 0.36 & 0.32 & 0.23 & 0.18 & 0.32 & 0.19 & 0.27 & 0.29 & 0.25 & 0.29 & 0.34 & 0.29 & 0.33 & 0.18\\
\addlinespace
Q1.6 & 0.27 & -0.04 & 0.34 & 0.20 & 0.29 & 1.00 & 0.55 & 0.04 & 0.35 & 0.12 & 0.15 & -0.12 & 0.29 & 0.31 & 0.31 & 0.27 & 0.24 & 0.44 & 0.36 & 0.35 & 0.42 & 0.41 & 0.32 & 0.37 & 0.42 & 0.31 & 0.34 & 0.39 & 0.27\\
Q1.7 & 0.18 & 0.02 & 0.44 & 0.18 & 0.29 & 0.55 & 1.00 & -0.01 & 0.49 & 0.12 & 0.12 & -0.11 & 0.35 & 0.36 & 0.34 & 0.31 & 0.29 & 0.40 & 0.28 & 0.36 & 0.33 & 0.39 & 0.30 & 0.33 & 0.42 & 0.39 & 0.37 & 0.40 & 0.24\\
Q1.8 & 0.09 & 0.20 & -0.05 & 0.00 & -0.03 & 0.04 & -0.01 & 1.00 & -0.09 & 0.09 & 0.14 & 0.24 & -0.05 & -0.02 & 0.06 & 0.02 & 0.05 & 0.07 & 0.09 & 0.04 & 0.06 & 0.05 & 0.10 & 0.02 & 0.10 & -0.04 & 0.03 & 0.05 & 0.10\\
Q1.9 & 0.08 & 0.09 & 0.58 & 0.17 & 0.33 & 0.35 & 0.49 & -0.09 & 1.00 & 0.14 & 0.06 & -0.04 & 0.48 & 0.43 & 0.33 & 0.39 & 0.37 & 0.22 & 0.07 & 0.37 & 0.14 & 0.23 & 0.29 & 0.23 & 0.32 & 0.50 & 0.40 & 0.40 & 0.11\\
Q1.10 & 0.11 & 0.16 & 0.14 & 0.10 & 0.14 & 0.12 & 0.12 & 0.09 & 0.14 & 1.00 & -0.09 & 0.12 & 0.16 & 0.11 & 0.11 & -0.03 & -0.03 & 0.14 & 0.09 & 0.10 & 0.08 & 0.09 & 0.07 & 0.13 & 0.08 & 0.13 & 0.08 & 0.07 & 0.05\\
\addlinespace
Q1.11 & 0.14 & 0.04 & 0.10 & 0.06 & 0.07 & 0.15 & 0.12 & 0.14 & 0.06 & -0.09 & 1.00 & 0.09 & 0.08 & 0.13 & 0.20 & 0.32 & 0.31 & 0.11 & 0.12 & 0.25 & 0.13 & 0.17 & 0.19 & 0.08 & 0.25 & 0.09 & 0.16 & 0.18 & 0.17\\
Q1.12 & -0.05 & 0.37 & -0.09 & -0.16 & -0.17 & -0.12 & -0.11 & 0.24 & -0.04 & 0.12 & 0.09 & 1.00 & -0.11 & -0.17 & -0.17 & -0.02 & 0.02 & -0.12 & -0.09 & 0.01 & -0.17 & -0.11 & -0.03 & -0.17 & -0.05 & -0.06 & 0.00 & -0.01 & -0.04\\
Q1.13 & 0.12 & 0.01 & 0.48 & 0.27 & 0.45 & 0.29 & 0.35 & -0.05 & 0.48 & 0.16 & 0.08 & -0.11 & 1.00 & 0.64 & 0.46 & 0.43 & 0.43 & 0.20 & 0.11 & 0.39 & 0.14 & 0.23 & 0.32 & 0.20 & 0.32 & 0.48 & 0.40 & 0.40 & 0.19\\
Q1.14 & 0.18 & -0.03 & 0.46 & 0.29 & 0.46 & 0.31 & 0.36 & -0.02 & 0.43 & 0.11 & 0.13 & -0.17 & 0.64 & 1.00 & 0.50 & 0.43 & 0.40 & 0.25 & 0.18 & 0.41 & 0.21 & 0.29 & 0.36 & 0.21 & 0.35 & 0.46 & 0.39 & 0.40 & 0.21\\
Q1.15 & 0.26 & -0.08 & 0.38 & 0.30 & 0.42 & 0.31 & 0.34 & 0.06 & 0.33 & 0.11 & 0.20 & -0.17 & 0.46 & 0.50 & 1.00 & 0.41 & 0.39 & 0.32 & 0.26 & 0.41 & 0.21 & 0.33 & 0.35 & 0.27 & 0.43 & 0.37 & 0.35 & 0.38 & 0.24\\
\addlinespace
Q1.16 & 0.16 & -0.02 & 0.39 & 0.18 & 0.36 & 0.27 & 0.31 & 0.02 & 0.39 & -0.03 & 0.32 & -0.02 & 0.43 & 0.43 & 0.41 & 1.00 & 0.63 & 0.20 & 0.14 & 0.52 & 0.16 & 0.30 & 0.40 & 0.19 & 0.39 & 0.40 & 0.48 & 0.50 & 0.20\\
Q1.17 & 0.15 & 0.04 & 0.38 & 0.17 & 0.32 & 0.24 & 0.29 & 0.05 & 0.37 & -0.03 & 0.31 & 0.02 & 0.43 & 0.40 & 0.39 & 0.63 & 1.00 & 0.17 & 0.12 & 0.45 & 0.13 & 0.26 & 0.36 & 0.15 & 0.36 & 0.40 & 0.44 & 0.46 & 0.21\\
Q1.18 & 0.25 & -0.04 & 0.24 & 0.18 & 0.23 & 0.44 & 0.40 & 0.07 & 0.22 & 0.14 & 0.11 & -0.12 & 0.20 & 0.25 & 0.32 & 0.20 & 0.17 & 1.00 & 0.49 & 0.28 & 0.47 & 0.44 & 0.29 & 0.42 & 0.37 & 0.24 & 0.25 & 0.31 & 0.30\\
Q1.19 & 0.27 & -0.04 & 0.14 & 0.19 & 0.18 & 0.36 & 0.28 & 0.09 & 0.07 & 0.09 & 0.12 & -0.09 & 0.11 & 0.18 & 0.26 & 0.14 & 0.12 & 0.49 & 1.00 & 0.21 & 0.44 & 0.38 & 0.24 & 0.37 & 0.32 & 0.14 & 0.18 & 0.23 & 0.28\\
Q1.20 & 0.19 & 0.05 & 0.39 & 0.18 & 0.32 & 0.35 & 0.36 & 0.04 & 0.37 & 0.10 & 0.25 & 0.01 & 0.39 & 0.41 & 0.41 & 0.52 & 0.45 & 0.28 & 0.21 & 1.00 & 0.23 & 0.33 & 0.40 & 0.24 & 0.41 & 0.40 & 0.50 & 0.52 & 0.25\\
\addlinespace
Q1.21 & 0.24 & -0.10 & 0.18 & 0.18 & 0.19 & 0.42 & 0.33 & 0.06 & 0.14 & 0.08 & 0.13 & -0.17 & 0.14 & 0.21 & 0.21 & 0.16 & 0.13 & 0.47 & 0.44 & 0.23 & 1.00 & 0.42 & 0.24 & 0.42 & 0.30 & 0.15 & 0.24 & 0.26 & 0.29\\
Q1.22 & 0.23 & -0.08 & 0.28 & 0.23 & 0.27 & 0.41 & 0.39 & 0.05 & 0.23 & 0.09 & 0.17 & -0.11 & 0.23 & 0.29 & 0.33 & 0.30 & 0.26 & 0.44 & 0.38 & 0.33 & 0.42 & 1.00 & 0.34 & 0.38 & 0.37 & 0.23 & 0.35 & 0.38 & 0.34\\
Q1.23 & 0.19 & 0.00 & 0.34 & 0.16 & 0.29 & 0.32 & 0.30 & 0.10 & 0.29 & 0.07 & 0.19 & -0.03 & 0.32 & 0.36 & 0.35 & 0.40 & 0.36 & 0.29 & 0.24 & 0.40 & 0.24 & 0.34 & 1.00 & 0.23 & 0.32 & 0.33 & 0.39 & 0.44 & 0.23\\
Q1.24 & 0.21 & -0.08 & 0.23 & 0.23 & 0.25 & 0.37 & 0.33 & 0.02 & 0.23 & 0.13 & 0.08 & -0.17 & 0.20 & 0.21 & 0.27 & 0.19 & 0.15 & 0.42 & 0.37 & 0.24 & 0.42 & 0.38 & 0.23 & 1.00 & 0.31 & 0.21 & 0.24 & 0.25 & 0.27\\
Q1.25 & 0.23 & 0.01 & 0.36 & 0.22 & 0.29 & 0.42 & 0.42 & 0.10 & 0.32 & 0.08 & 0.25 & -0.05 & 0.32 & 0.35 & 0.43 & 0.39 & 0.36 & 0.37 & 0.32 & 0.41 & 0.30 & 0.37 & 0.32 & 0.31 & 1.00 & 0.34 & 0.35 & 0.40 & 0.23\\
\addlinespace
Q1.26 & 0.10 & 0.07 & 0.47 & 0.19 & 0.34 & 0.31 & 0.39 & -0.04 & 0.50 & 0.13 & 0.09 & -0.06 & 0.48 & 0.46 & 0.37 & 0.40 & 0.40 & 0.24 & 0.14 & 0.40 & 0.15 & 0.23 & 0.33 & 0.21 & 0.34 & 1.00 & 0.45 & 0.47 & 0.15\\
Q1.27 & 0.13 & 0.05 & 0.40 & 0.17 & 0.29 & 0.34 & 0.37 & 0.03 & 0.40 & 0.08 & 0.16 & 0.00 & 0.40 & 0.39 & 0.35 & 0.48 & 0.44 & 0.25 & 0.18 & 0.50 & 0.24 & 0.35 & 0.39 & 0.24 & 0.35 & 0.45 & 1.00 & 0.62 & 0.23\\
Q1.28 & 0.18 & 0.02 & 0.43 & 0.21 & 0.33 & 0.39 & 0.40 & 0.05 & 0.40 & 0.07 & 0.18 & -0.01 & 0.40 & 0.40 & 0.38 & 0.50 & 0.46 & 0.31 & 0.23 & 0.52 & 0.26 & 0.38 & 0.44 & 0.25 & 0.40 & 0.47 & 0.62 & 1.00 & 0.26\\
Q1.29 & 0.20 & -0.03 & 0.17 & 0.19 & 0.18 & 0.27 & 0.24 & 0.10 & 0.11 & 0.05 & 0.17 & -0.04 & 0.19 & 0.21 & 0.24 & 0.20 & 0.21 & 0.30 & 0.28 & 0.25 & 0.29 & 0.34 & 0.23 & 0.27 & 0.23 & 0.15 & 0.23 & 0.26 & 1.00\\
\bottomrule
\end{tabular}}\setkeys{Gin}{height=\maxheight}
\textbf{Questions}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Do you see any high correlations between the responses? Do they make
sense?
\item
What do these correlations imply?
\end{enumerate}
\textbf{Answers:}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
\hypertarget{step-4-choose-number-of-factors}{%
\subsection{Step 4: Choose number of
factors}\label{step-4-choose-number-of-factors}}
Clearly the survey asked many redundant questions (can you think some
reasons why?), so we may be able to actually ``group'' these 29 attitude
questions into only a few ``key factors''. This not only will simplify
the data, but will also greatly facilitate our understanding of the
customers.
To do so, we use methods called
\href{https://en.wikipedia.org/wiki/Principal_component_analysis}{Principal
Component Analysis} and
\href{https://en.wikipedia.org/wiki/Factor_analysis}{factor analysis} as
also discussed in the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/CourseSessions/Sessions23/FactorAnalysisReading.html}{Dimensionality
Reduction readings}. We can use two different R commands for this (they
make slightly different information easily available as output): the
command \texttt{principal} (check \texttt{help(principal)} from R
package \href{http://personality-project.org/r/psych/}{psych}), and the
command \texttt{PCA} from R package
\href{http://factominer.free.fr}{FactoMineR} - there are more packages
and commands for this, as these methods are very widely used.
Let's look at the \textbf{variance explained} as well as the
\textbf{eigenvalues} (see session readings):
\begin{center}
\begin{longtable}{lrrr}
\toprule
& Eigenvalue & Pct of explained variance & Cumulative pct of explained variance\\
\midrule
Component 1 & 8.43 & 29.08 & 29.08\\
Component 2 & 2.33 & 8.05 & 37.12\\
Component 3 & 1.86 & 6.42 & 43.55\\
Component 4 & 1.46 & 5.03 & 48.57\\
Component 5 & 1.21 & 4.16 & 52.74\\
\addlinespace
Component 6 & 0.90 & 3.10 & 55.84\\
Component 7 & 0.82 & 2.82 & 58.65\\
Component 8 & 0.79 & 2.71 & 61.36\\
Component 9 & 0.78 & 2.69 & 64.05\\
Component 10 & 0.74 & 2.56 & 66.61\\
\addlinespace
Component 11 & 0.69 & 2.37 & 68.98\\
Component 12 & 0.65 & 2.25 & 71.23\\
Component 13 & 0.65 & 2.23 & 73.47\\
Component 14 & 0.62 & 2.13 & 75.60\\
Component 15 & 0.61 & 2.10 & 77.70\\
\addlinespace
Component 16 & 0.58 & 1.99 & 79.69\\
Component 17 & 0.56 & 1.94 & 81.62\\
Component 18 & 0.54 & 1.85 & 83.47\\
Component 19 & 0.52 & 1.81 & 85.28\\
Component 20 & 0.51 & 1.76 & 87.04\\
\addlinespace
Component 21 & 0.50 & 1.72 & 88.77\\
Component 22 & 0.49 & 1.69 & 90.45\\
Component 23 & 0.46 & 1.59 & 92.04\\
Component 24 & 0.46 & 1.57 & 93.61\\
Component 25 & 0.41 & 1.42 & 95.03\\
\addlinespace
Component 26 & 0.38 & 1.32 & 96.36\\
Component 27 & 0.37 & 1.28 & 97.63\\
Component 28 & 0.35 & 1.22 & 98.85\\
Component 29 & 0.33 & 1.15 & 100.00\\
\bottomrule
\end{longtable}\end{center}
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-9-1} \end{center}
\textbf{Questions:}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Can you explain what this table and the plot are? What do they
indicate? What can we learn from these?
\item
Why does the plot have this specific shape? Could the plotted line be
increasing?
\item
What characteristics of these results would we prefer to see? Why?
\end{enumerate}
\textbf{Answers}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
\hypertarget{step-5-interpret-the-factors}{%
\subsection{Step 5: Interpret the
factors}\label{step-5-interpret-the-factors}}
Let's now see how the ``top factors'' look like.
To better visualize them, we will use what is called a ``rotation''.
There are many rotation methods. In this case we selected the varimax
rotation. For our data, the 5 selected factors look as follows after
this rotation:
\setkeys{Gin}{height=\textheight}\adjustbox{width=\linewidth}{
\begin{tabular}{lrrrrr}
\toprule
& Comp.1 & Comp.2 & Comp.3 & Comp.4 & Comp.5\\
\midrule
Q1.9 & 0.78 & 0.12 & 0.00 & -0.12 & -0.01\\
Q1.26 & 0.72 & 0.11 & 0.10 & 0.01 & 0.02\\
Q1.3 & 0.71 & 0.15 & 0.17 & -0.04 & -0.06\\
Q1.13 & 0.68 & 0.02 & 0.40 & 0.01 & -0.03\\
Q1.27 & 0.63 & 0.24 & -0.02 & 0.28 & 0.05\\
\addlinespace
Q1.28 & 0.62 & 0.30 & 0.03 & 0.31 & 0.04\\
Q1.14 & 0.61 & 0.11 & 0.44 & 0.09 & -0.07\\
Q1.16 & 0.57 & 0.09 & 0.14 & 0.55 & -0.03\\
Q1.7 & 0.56 & 0.50 & -0.05 & -0.07 & -0.03\\
Q1.17 & 0.55 & 0.04 & 0.15 & 0.54 & 0.03\\
\addlinespace
Q1.20 & 0.55 & 0.25 & 0.11 & 0.36 & 0.10\\
Q1.5 & 0.42 & 0.14 & 0.58 & 0.03 & -0.18\\
Q1.15 & 0.42 & 0.24 & 0.50 & 0.22 & -0.03\\
Q1.23 & 0.41 & 0.29 & 0.15 & 0.31 & 0.07\\
Q1.25 & 0.39 & 0.43 & 0.15 & 0.25 & 0.06\\
\addlinespace
Q1.6 & 0.38 & 0.62 & 0.02 & 0.00 & -0.02\\
Q1.22 & 0.24 & 0.62 & 0.11 & 0.18 & -0.05\\
Q1.18 & 0.18 & 0.73 & 0.09 & 0.00 & 0.01\\
Q1.10 & 0.17 & 0.14 & 0.31 & -0.48 & 0.47\\
Q1.24 & 0.17 & 0.63 & 0.12 & -0.06 & -0.09\\
\addlinespace
Q1.2 & 0.15 & -0.07 & -0.27 & -0.07 & 0.71\\
Q1.4 & 0.13 & 0.17 & 0.65 & 0.01 & -0.15\\
Q1.29 & 0.08 & 0.45 & 0.19 & 0.24 & 0.06\\
Q1.21 & 0.07 & 0.73 & 0.04 & 0.05 & -0.10\\
Q1.11 & 0.04 & 0.13 & 0.06 & 0.66 & 0.14\\
\addlinespace
Q1.19 & 0.00 & 0.70 & 0.15 & 0.07 & 0.04\\
Q1.1 & -0.02 & 0.37 & 0.41 & 0.14 & 0.16\\
Q1.12 & -0.02 & -0.17 & -0.18 & 0.09 & 0.70\\
Q1.8 & -0.18 & 0.13 & 0.19 & 0.23 & 0.59\\
\bottomrule
\end{tabular}}\setkeys{Gin}{height=\maxheight}
To better visualize and interpret the factors we often ``suppress''
loadings with small values, e.g.~with absolute values smaller than 0.5.
In this case our factors look as follows after suppressing the small
numbers:
\setkeys{Gin}{height=\textheight}\adjustbox{width=\linewidth}{
\begin{tabular}{lrrrrr}
\toprule
& Comp.1 & Comp.2 & Comp.3 & Comp.4 & Comp.5\\
\midrule
Q1.9 & 0.78 & & & & \\
Q1.26 & 0.72 & & & & \\
Q1.3 & 0.71 & & & & \\
Q1.13 & 0.68 & & & & \\
Q1.27 & 0.63 & & & & \\
\addlinespace
Q1.28 & 0.62 & & & & \\
Q1.14 & 0.61 & & & & \\
Q1.16 & 0.57 & & & 0.55 & \\
Q1.7 & 0.56 & 0.50 & & & \\
Q1.17 & 0.55 & & & 0.54 & \\
\addlinespace
Q1.20 & 0.55 & & & & \\
Q1.5 & & & 0.58 & & \\
Q1.15 & & & 0.50 & & \\
Q1.23 & & & & & \\
Q1.25 & & & & & \\
\addlinespace
Q1.6 & & 0.62 & & & \\
Q1.22 & & 0.62 & & & \\
Q1.18 & & 0.73 & & & \\
Q1.10 & & & & & \\
Q1.24 & & 0.63 & & & \\
\addlinespace
Q1.2 & & & & & 0.71\\
Q1.4 & & & 0.65 & & \\
Q1.29 & & & & & \\
Q1.21 & & 0.73 & & & \\
Q1.11 & & & & 0.66 & \\
\addlinespace
Q1.19 & & 0.70 & & & \\
Q1.1 & & & & & \\
Q1.12 & & & & & 0.70\\
Q1.8 & & & & & 0.59\\
\bottomrule
\end{tabular}}\setkeys{Gin}{height=\maxheight}
\textbf{Questions}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
What do the first couple of factors mean? Do they make business sense?
\item
How many factors should we choose for this data/customer base? Please
try a few and explain your final choice based on a) statistical
arguments, b) on interpretation arguments, c) on business arguments
(\textbf{you need to consider all three types of arguments})
\item
How would you interpret the factors you selected?
\item
What lessons about data science do you learn when doing this analysis?
Please comment.
\end{enumerate}
\textbf{Answers}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
\hypertarget{step-6-save-factor-scores}{%
\subsection{Step 6: Save factor
scores}\label{step-6-save-factor-scores}}
We can now either replace all initial variables used in this part with
the factor scores, or just select one of the initial variables for each
of the selected factors in order to represent that factor. Here is how
the factor scores are for the first few respondents:
\setkeys{Gin}{height=\textheight}\adjustbox{width=\linewidth}{
\begin{tabular}{lrrrrrrrrrr}
\toprule
& Obs.01 & Obs.02 & Obs.03 & Obs.04 & Obs.05 & Obs.06 & Obs.07 & Obs.08 & Obs.09 & Obs.10\\
\midrule
DV (Factor) 1 & 1.63 & 1.39 & 1.81 & 0.39 & 1.67 & 0.26 & 1.06 & 1.19 & 1.80 & 0.85\\
DV (Factor) 2 & 1.21 & -0.09 & -1.19 & 0.08 & -0.20 & 0.97 & 0.97 & -0.05 & -0.76 & 1.37\\
DV (Factor) 3 & 1.76 & 0.04 & 0.95 & 2.06 & 1.49 & 0.43 & -0.17 & -0.12 & -0.15 & -3.42\\
DV (Factor) 4 & -1.09 & -1.19 & -0.68 & 0.19 & 0.62 & -0.25 & 2.45 & -0.67 & 0.77 & -0.94\\
DV (Factor) 5 & -1.67 & -1.01 & -2.26 & -0.58 & -0.74 & -1.08 & -0.24 & -0.82 & -1.11 & 1.23\\
\bottomrule
\end{tabular}}\setkeys{Gin}{height=\maxheight}
\textbf{Questions}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Can you describe some of the people using the new derived variables
(factor scores)?
\item
Which of the 29 initial variables would you select to represent each
of the factors you selected?
\end{enumerate}
\textbf{Answers}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
\clearpage
\hypertarget{part-2-customer-segmentation}{%
\section{Part 2: Customer
Segmentation}\label{part-2-customer-segmentation}}
The code used here is along the lines of the code in the reading
\href{https://github.com/InseadDataAnalytics/INSEADAnalytics/blob/master/CourseSessions/Sessions45/ClusterAnalysisReading.Rmd}{ClusterAnalysisReading.Rmd}.
We follow the process described in the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/CourseSessions/Sessions45/ClusterAnalysisReading.html}{Cluster
Analysis} reading.
In this part we also become familiar with:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Some clustering Methods;
\item
How these tools can be used in practice.
\end{enumerate}
A key family of methods used for segmentation is what is called
\textbf{clustering methods}. This is a very important problem in
statistics and \textbf{machine learning}, used in all sorts of
applications such as in
\href{http://www.cs.umd.edu/~samir/498/Amazon-Recommendations.pdf}{Amazon's
pioneer work on recommender systems}. There are many \emph{mathematical
methods} for clustering. We will use two very standard methods,
\textbf{hierarchical clustering} and \textbf{k-means}. While the
``math'' behind all these methods can be complex, the R functions used
are relatively simple to use, as we will see.
(All user inputs for this part should be selected in the code chunk in
the raw .Rmd file)
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{\# Please ENTER then original raw attributes to use for the segmentation (the "segmentation attributes")}
\CommentTok{\# Please use numbers, not column names, e.g. c(1:5, 7, 8) uses columns 1,2,3,4,5,7,8}
\NormalTok{segmentation\_attributes\_used }\OtherTok{=} \FunctionTok{c}\NormalTok{(}\DecValTok{28}\NormalTok{,}\DecValTok{25}\NormalTok{,}\DecValTok{27}\NormalTok{,}\DecValTok{14}\NormalTok{,}\DecValTok{20}\NormalTok{,}\DecValTok{8}\NormalTok{,}\DecValTok{3}\NormalTok{,}\DecValTok{12}\NormalTok{,}\DecValTok{13}\NormalTok{,}\DecValTok{5}\NormalTok{,}\DecValTok{9}\NormalTok{,}\DecValTok{11}\NormalTok{,}\DecValTok{2}\NormalTok{,}\DecValTok{30}\NormalTok{,}\DecValTok{24}\NormalTok{) }\CommentTok{\#c(10,19,5,12,3) }
\CommentTok{\# Please ENTER then original raw attributes to use for the profiling of the segments (the "profiling attributes")}
\CommentTok{\# Please use numbers, not column names, e.g. c(1:5, 7, 8) uses columns 1,2,3,4,5,7,8}
\NormalTok{profile\_attributes\_used }\OtherTok{=} \FunctionTok{c}\NormalTok{(}\DecValTok{2}\SpecialCharTok{:}\DecValTok{82}\NormalTok{) }
\CommentTok{\# Please ENTER the number of clusters to eventually use for this report}
\NormalTok{numb\_clusters\_used }\OtherTok{=} \DecValTok{7} \CommentTok{\# for boats possibly use 5, for Mall\_Visits use 3}
\CommentTok{\# Please enter the method to use for the segmentation:}
\NormalTok{profile\_with }\OtherTok{=} \StringTok{"hclust"} \CommentTok{\# "hclust" or "kmeans"}
\CommentTok{\# Please ENTER the distance metric eventually used for the clustering in case of hierarchical clustering }
\CommentTok{\# (e.g. "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski" {-} see help(dist)). }
\CommentTok{\# DEFAULT is "euclidean"}
\NormalTok{distance\_used }\OtherTok{=} \StringTok{"euclidean"}
\CommentTok{\# Please ENTER the hierarchical clustering method to use (options are:}
\CommentTok{\# "ward.D", "ward.D2", "single", "complete", "average", "mcquitty", "median" or "centroid").}
\CommentTok{\# DEFAULT is "ward"}
\NormalTok{hclust\_method }\OtherTok{=} \StringTok{"ward.D"}
\CommentTok{\# Please ENTER the kmeans clustering method to use (options are:}
\CommentTok{\# "Hartigan{-}Wong", "Lloyd", "Forgy", "MacQueen").}
\CommentTok{\# DEFAULT is "Lloyd"}
\NormalTok{kmeans\_method }\OtherTok{=} \StringTok{"Lloyd"}
\end{Highlighting}
\end{Shaded}
\hypertarget{steps-1-2-explore-the-data}{%
\subsection{Steps 1-2: Explore the
data}\label{steps-1-2-explore-the-data}}
(This was done above, so we skip it)
\hypertarget{step-3.-select-segmentation-variables}{%
\subsection{Step 3. Select Segmentation
Variables}\label{step-3.-select-segmentation-variables}}
For simplicity will use one representative question for each of the
factor we found in Part 1 (we can also use the ``factor scores'' for
each respondent) to represent our survey respondents. These are the
\texttt{segmentation\_attributes\_used} selected below. We can choose
the question with the highest absolute factor loading for each factor.
For example, when we use 5 factors with the varimax rotation we can
select questions Q.1.9 (I see my boat as a status symbol), Q1.18
(Boating gives me a feeling of adventure), Q1.4 (I only consider buying
a boat from a reputable brand), Q1.11 (I tend to perform minor boat
repairs and maintenance on my own) and Q1.2 (When buying a boat getting
the lowest price is more important than the boat brand) - try it. These
are columns 10, 19, 5, 12, and 3, respectively of the data matrix
\texttt{Projectdata}.
\hypertarget{step-4-define-similarity-measure}{%
\subsection{Step 4: Define similarity
measure}\label{step-4-define-similarity-measure}}
We need to define a distance metric that measures how different people
(observations in general) are from each other. This can be an important
choice. Here are the differences between the observations using the
distance metric we selected:
\begin{center}
\begin{longtable}{lrrrrrrrrrr}
\toprule
& Obs.01 & Obs.02 & Obs.03 & Obs.04 & Obs.05 & Obs.06 & Obs.07 & Obs.08 & Obs.09 & Obs.10\\
\midrule
Obs.01 & 0 & & & & & & & & & \\
Obs.02 & 4 & 0 & & & & & & & & \\
Obs.03 & 4 & 3 & 0 & & & & & & & \\
Obs.04 & 4 & 4 & 4 & 0 & & & & & & \\
Obs.05 & 4 & 4 & 5 & 4 & 0 & & & & & \\
\addlinespace
Obs.06 & 4 & 3 & 3 & 4 & 4 & 0 & & & & \\
Obs.07 & 6 & 5 & 6 & 6 & 4 & 5 & 0 & & & \\
Obs.08 & 4 & 3 & 4 & 4 & 4 & 4 & 5 & 0 & & \\
Obs.09 & 5 & 4 & 5 & 4 & 3 & 4 & 4 & 3 & 0 & \\
Obs.10 & 8 & 6 & 7 & 7 & 8 & 5 & 7 & 7 & 7 & 0\\
\bottomrule
\end{longtable}\end{center}
\hypertarget{step-5-visualize-pair-wise-distances}{%
\subsection{Step 5: Visualize Pair-wise
Distances}\label{step-5-visualize-pair-wise-distances}}
We can see the histogram of, say, the first 2 variables (can you change
the code chunk in the raw .Rmd file to see other variables?)
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-16-1} \end{center}
or the histogram of all pairwise distances for the euclidean distance:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-17-1} \end{center}
\hypertarget{step-6-method-and-number-of-segments}{%
\subsection{Step 6: Method and Number of
Segments}\label{step-6-method-and-number-of-segments}}
We need to select the clustering method to use, as well as the number of
cluster. It may be useful to see the dendrogram from Hierarchical
Clustering, to have a quick idea of how the data may be segmented and
how many segments there may be. Here is the dendrogram for our data:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-18-1} \end{center}
We can also plot the ``distances'' traveled before we need to merge any
of the lower and smaller in size clusters into larger ones - the heights
of the tree branches that link the clusters as we traverse the tree from
its leaves to its root. If we have n observations, this plot has n-1
numbers, we see the first 20 here.
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-19-1} \end{center}
Here is the segment membership of the first 10 respondents if we use
hierarchical clustering:
\begin{center}
\begin{longtable}{rr}
\toprule
Observation Number & Cluster\_Membership\\
\midrule
1 & 1\\
2 & 2\\
3 & 1\\
4 & 3\\
5 & 4\\
\addlinespace
6 & 1\\
7 & 4\\
8 & 2\\
9 & 4\\
10 & 3\\
\bottomrule
\end{longtable}\end{center}
while this is the segment membership if we use k-means:
\begin{center}
\begin{longtable}{rr}
\toprule
Observation Number & Cluster\_Membership\\
\midrule
1 & 5\\
2 & 5\\
3 & 5\\
4 & 5\\
5 & 5\\
\addlinespace
6 & 2\\
7 & 5\\
8 & 5\\
9 & 5\\
10 & 4\\
\bottomrule
\end{longtable}\end{center}
\hypertarget{step-7-profile-and-interpret-the-segments}{%
\subsection{Step 7: Profile and interpret the
segments}\label{step-7-profile-and-interpret-the-segments}}
In market segmentation one may use variables to \textbf{profile} the
segments which are not the same (necessarily) as those used to
\textbf{segment} the market: the latter may be, for example,
attitude/needs related (you define segments based on what the customers
``need''), while the former may be any information that allows a company
to identify the defined customer segments (e.g.~demographics, location,
etc). Of course deciding which variables to use for segmentation and
which to use for profiling (and then \textbf{activation} of the
segmentation for business purposes) is largely subjective. In this case
we can use all survey questions for profiling for now - the
\texttt{profile\_attributes\_used} variables selected below.
There are many ways to do the profiling of the segments. For example,
here we show how the \emph{average} answers of the respondents \emph{in
each segment} compare to the \emph{average answer of all respondents}
using the ratio of the two. The idea is that if in a segment the average
response to a question is very different (e.g.~away from ratio of 1)
than the overall average, then that question may indicate something
about the segment relative to the total population.
Here are for example the profiles of the segments using the clusters
found above. First let's see just the average answer people gave to each
question for the different segments as well as the total population:
\begin{center}
\begin{longtable}{lrrrrrrrr}
\toprule
& Population & Seg.1 & Seg.2 & Seg.3 & Seg.4 & Seg.5 & Seg.6 & Seg.7\\
\midrule
Q1.1 & 4.03 & 4.01 & 4.20 & 3.84 & 4.41 & 4.41 & 3.73 & 3.83\\
Q1.2 & 2.89 & 2.29 & 2.74 & 3.77 & 2.63 & 4.33 & 2.90 & 3.04\\
Q1.3 & 3.12 & 3.56 & 3.03 & 3.52 & 3.92 & 4.23 & 2.71 & 2.37\\
Q1.4 & 3.89 & 4.26 & 3.98 & 3.65 & 4.23 & 4.36 & 3.56 & 3.57\\
Q1.5 & 3.55 & 3.96 & 3.56 & 3.61 & 4.16 & 4.28 & 3.24 & 2.93\\
\addlinespace
Q1.6 & 3.95 & 4.27 & 4.00 & 3.89 & 4.69 & 4.47 & 3.57 & 3.51\\
Q1.7 & 3.67 & 4.17 & 3.66 & 3.74 & 4.53 & 4.45 & 3.29 & 2.94\\
Q1.8 & 3.74 & 3.25 & 3.85 & 3.73 & 3.81 & 4.41 & 3.59 & 3.94\\
Q1.9 & 2.89 & 3.36 & 2.73 & 3.46 & 3.67 & 4.24 & 2.58 & 1.98\\
Q1.10 & 3.37 & 3.16 & 3.28 & 3.65 & 3.47 & 4.31 & 3.47 & 3.08\\
\addlinespace
Q1.11 & 3.46 & 3.04 & 3.98 & 3.52 & 4.24 & 4.26 & 2.08 & 4.06\\
Q1.12 & 2.86 & 2.16 & 2.74 & 3.68 & 2.11 & 4.42 & 2.85 & 3.42\\
Q1.13 & 3.02 & 3.56 & 2.87 & 3.56 & 3.86 & 4.38 & 2.70 & 2.06\\
Q1.14 & 3.25 & 3.67 & 3.19 & 3.64 & 4.13 & 4.31 & 2.79 & 2.51\\
Q1.15 & 3.63 & 3.94 & 3.67 & 3.75 & 4.43 & 4.37 & 3.16 & 3.11\\
\addlinespace
Q1.16 & 3.10 & 3.34 & 3.20 & 3.48 & 3.95 & 4.32 & 2.43 & 2.52\\
Q1.17 & 3.08 & 3.20 & 3.14 & 3.42 & 3.87 & 4.30 & 2.52 & 2.63\\
Q1.18 & 4.12 & 4.38 & 4.17 & 3.91 & 4.72 & 4.43 & 3.82 & 3.84\\
Q1.19 & 4.20 & 4.44 & 4.28 & 3.79 & 4.71 & 4.45 & 3.87 & 4.09\\
Q1.20 & 3.16 & 3.40 & 3.23 & 3.47 & 3.99 & 4.26 & 2.57 & 2.59\\
\addlinespace
Q1.21 & 4.25 & 4.50 & 4.32 & 3.88 & 4.79 & 4.44 & 3.96 & 4.06\\
Q1.22 & 4.01 & 4.27 & 4.08 & 3.84 & 4.60 & 4.37 & 3.64 & 3.74\\
Q1.23 & 3.56 & 3.75 & 3.69 & 3.70 & 4.67 & 4.19 & 3.00 & 2.94\\
Q1.24 & 4.11 & 4.47 & 4.16 & 3.77 & 4.64 & 4.45 & 3.83 & 3.78\\
Q1.25 & 3.79 & 4.08 & 3.87 & 3.85 & 4.54 & 4.49 & 3.24 & 3.42\\
\addlinespace
Q1.26 & 2.95 & 3.45 & 2.81 & 3.63 & 4.00 & 4.36 & 2.49 & 1.94\\
Q1.27 & 3.16 & 3.58 & 3.14 & 3.87 & 4.25 & 4.40 & 2.52 & 2.25\\
Q1.28 & 3.31 & 3.64 & 3.32 & 3.71 & 4.29 & 4.37 & 2.79 & 2.55\\
Q1.29 & 4.03 & 4.20 & 4.07 & 3.80 & 4.56 & 4.53 & 3.70 & 3.90\\
Q2 & 0.90 & 0.93 & 0.91 & 0.97 & 0.92 & 1.18 & 0.77 & 0.92\\
\addlinespace
Q2.Cluster & 0.74 & 0.75 & 0.77 & 0.73 & 0.78 & 0.75 & 0.65 & 0.81\\
Q3 & 4.15 & 4.25 & 4.14 & 4.25 & 4.46 & 4.60 & 4.02 & 3.88\\
Q4 & 3.92 & 4.39 & 3.67 & 4.60 & 4.40 & 4.45 & 3.90 & 3.16\\
Q5 & 3.25 & 3.80 & 3.05 & 3.84 & 4.37 & 4.77 & 2.78 & 2.35\\
Q6 & 22.83 & 24.48 & 22.19 & 22.84 & 26.09 & 24.35 & 22.75 & 20.07\\
\addlinespace
Q7.1 & 2.23 & 1.95 & 2.21 & 2.84 & 2.32 & 3.11 & 2.00 & 2.28\\
Q7.2 & 4.00 & 4.25 & 4.00 & 3.73 & 4.17 & 4.00 & 3.95 & 3.88\\
Q7.3 & 3.80 & 3.95 & 3.85 & 3.70 & 4.10 & 3.98 & 3.68 & 3.52\\
Q7.4 & 3.67 & 3.84 & 3.69 & 3.72 & 3.96 & 4.03 & 3.53 & 3.37\\
Q8 & 2.31 & 2.44 & 2.46 & 2.08 & 2.66 & 2.32 & 1.95 & 2.27\\
\addlinespace
Q9.1 & 3.57 & 3.28 & 3.70 & 3.60 & 3.87 & 4.03 & 3.23 & 3.75\\
Q9.2 & 3.41 & 3.63 & 3.38 & 3.60 & 3.72 & 3.92 & 3.20 & 3.11\\
Q9.3 & 3.72 & 4.05 & 3.66 & 3.72 & 3.90 & 4.07 & 3.68 & 3.35\\
Q9.4 & 3.19 & 3.38 & 3.16 & 3.52 & 3.51 & 3.92 & 3.05 & 2.68\\
Q9.5 & 3.51 & 3.84 & 3.45 & 3.56 & 3.87 & 3.94 & 3.47 & 2.93\\
\addlinespace
Q10 & 46.25 & 52.82 & 48.54 & 40.06 & 55.10 & 45.82 & 41.96 & 38.15\\
Q11 & 1.45 & 1.57 & 1.41 & 1.34 & 1.38 & 1.40 & 1.57 & 1.35\\
Q12 & 13.42 & 14.08 & 13.34 & 12.89 & 13.99 & 12.66 & 13.56 & 12.88\\
Q13 & 2.08 & 2.23 & 1.99 & 2.28 & 2.17 & 2.60 & 2.09 & 1.80\\
Q14 & 2.27 & 2.21 & 2.32 & 1.78 & 1.86 & 1.68 & 2.45 & 2.65\\
\addlinespace
Q15 & 2.54 & 2.38 & 2.66 & 1.79 & 2.32 & 1.74 & 2.67 & 3.06\\
Q16 & 24.77 & 25.42 & 24.30 & 21.25 & 23.82 & 23.27 & 26.38 & 25.75\\
Q16.1 & 3.66 & 3.72 & 3.71 & 3.81 & 3.88 & 4.19 & 3.44 & 3.41\\
Q16.2 & 3.56 & 3.70 & 3.53 & 3.75 & 3.88 & 4.28 & 3.30 & 3.34\\
Q16.3 & 3.72 & 3.87 & 3.71 & 3.76 & 4.13 & 4.38 & 3.43 & 3.51\\
\addlinespace
Q16.4 & 3.76 & 3.98 & 3.74 & 3.78 & 4.18 & 4.31 & 3.46 & 3.59\\
Q16.5 & 3.71 & 3.83 & 3.71 & 3.85 & 4.04 & 4.21 & 3.47 & 3.48\\
Q16.6 & 3.82 & 4.04 & 3.81 & 3.92 & 4.21 & 4.41 & 3.55 & 3.56\\
Q16.7 & 3.91 & 4.13 & 3.91 & 3.96 & 4.21 & 4.48 & 3.65 & 3.67\\
Q16.8 & 3.91 & 4.03 & 3.91 & 3.90 & 4.25 & 4.38 & 3.66 & 3.79\\
\addlinespace
Q16.9 & 3.91 & 4.05 & 3.92 & 3.88 & 4.20 & 4.32 & 3.71 & 3.77\\
Q16.10 & 3.83 & 4.06 & 3.84 & 3.74 & 4.17 & 3.92 & 3.64 & 3.69\\
Q16.11 & 3.65 & 3.74 & 3.62 & 3.84 & 3.99 & 4.30 & 3.45 & 3.41\\
Q16.12 & 3.56 & 3.85 & 3.54 & 3.55 & 3.97 & 3.69 & 3.45 & 3.17\\
Q16.13 & 3.66 & 3.93 & 3.62 & 3.79 & 4.02 & 4.33 & 3.45 & 3.31\\
\addlinespace
Q16.14 & 3.75 & 4.04 & 3.71 & 3.77 & 4.19 & 4.36 & 3.49 & 3.46\\
Q16.15 & 3.88 & 4.14 & 3.88 & 3.81 & 4.28 & 4.33 & 3.63 & 3.65\\
Q16.16 & 3.67 & 3.94 & 3.62 & 3.75 & 4.09 & 4.34 & 3.45 & 3.31\\
Q16.17 & 3.85 & 4.02 & 3.84 & 3.83 & 4.23 & 4.35 & 3.60 & 3.69\\
Q16.18 & 3.88 & 4.04 & 3.88 & 3.85 & 4.23 & 4.35 & 3.65 & 3.74\\
\addlinespace
Q16.19 & 3.89 & 4.06 & 3.90 & 3.83 & 4.29 & 4.29 & 3.63 & 3.74\\
Q16.20 & 3.97 & 4.14 & 3.99 & 3.85 & 4.33 & 4.38 & 3.74 & 3.82\\
Q16.21 & 3.91 & 4.08 & 3.90 & 3.82 & 4.27 & 4.37 & 3.69 & 3.82\\
Q16.22 & 3.93 & 3.98 & 3.96 & 3.81 & 4.29 & 4.36 & 3.67 & 3.91\\
Q16.23 & 3.99 & 4.14 & 4.01 & 3.90 & 4.33 & 4.33 & 3.75 & 3.88\\
\addlinespace
Q16.24 & 3.31 & 3.28 & 3.27 & 3.65 & 3.31 & 4.15 & 3.17 & 3.23\\
Q16.25 & 3.65 & 3.85 & 3.66 & 3.74 & 4.09 & 4.27 & 3.34 & 3.34\\
Q16.26 & 3.90 & 4.07 & 3.90 & 3.86 & 4.26 & 4.38 & 3.66 & 3.72\\
Q16.27 & 3.63 & 3.81 & 3.60 & 3.78 & 4.05 & 4.36 & 3.37 & 3.31\\
Q17 & 0.33 & 0.41 & 0.36 & 0.30 & 0.52 & 0.45 & 0.19 & 0.28\\
\addlinespace
Q18 & 0.50 & 0.45 & 0.53 & 0.30 & 0.41 & 0.31 & 0.55 & 0.62\\
\bottomrule
\end{longtable}\end{center}
We can also ``visualize'' the segments using \textbf{snake plots} for
each cluster. For example, we can plot the means of the profiling
variables for each of our clusters to better visualize differences
between segments. For better visualization we plot the standardized
profiling variables.
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-23-1} \end{center}
We can also compare the averages of the profiling variables of each
segment relative to the average of the variables across the whole
population. This can also help us better understand whether there are
indeed clusters in our data (e.g.~if all segments are much like the
overall population, there may be no segments). For example, we can
measure the ratios of the average for each cluster to the average of the
population, minus 1, (e.g.~\texttt{avg(cluster)} \texttt{/}
\texttt{avg(population)} \texttt{-1}) for each segment and variable:
\begin{center}
\begin{longtable}{lrrrrrrr}
\toprule
& Seg.1 & Seg.2 & Seg.3 & Seg.4 & Seg.5 & Seg.6 & Seg.7\\
\midrule
Q1.1 & -0.01 & 0.04 & -0.05 & 0.09 & 0.10 & -0.07 & -0.05\\
Q1.2 & -0.21 & -0.05 & 0.30 & -0.09 & 0.50 & 0.01 & 0.05\\
Q1.3 & 0.14 & -0.03 & 0.13 & 0.26 & 0.36 & -0.13 & -0.24\\
Q1.4 & 0.10 & 0.02 & -0.06 & 0.09 & 0.12 & -0.08 & -0.08\\
Q1.5 & 0.11 & 0.00 & 0.02 & 0.17 & 0.20 & -0.09 & -0.17\\
\addlinespace
Q1.6 & 0.08 & 0.01 & -0.02 & 0.19 & 0.13 & -0.10 & -0.11\\
Q1.7 & 0.13 & 0.00 & 0.02 & 0.23 & 0.21 & -0.10 & -0.20\\
Q1.8 & -0.13 & 0.03 & 0.00 & 0.02 & 0.18 & -0.04 & 0.05\\
Q1.9 & 0.16 & -0.05 & 0.20 & 0.27 & 0.47 & -0.11 & -0.31\\
Q1.10 & -0.06 & -0.03 & 0.08 & 0.03 & 0.28 & 0.03 & -0.09\\
\addlinespace
Q1.11 & -0.12 & 0.15 & 0.02 & 0.23 & 0.23 & -0.40 & 0.17\\
Q1.12 & -0.24 & -0.04 & 0.29 & -0.26 & 0.55 & 0.00 & 0.20\\
Q1.13 & 0.18 & -0.05 & 0.18 & 0.28 & 0.45 & -0.11 & -0.32\\
Q1.14 & 0.13 & -0.02 & 0.12 & 0.27 & 0.33 & -0.14 & -0.23\\
Q1.15 & 0.09 & 0.01 & 0.03 & 0.22 & 0.20 & -0.13 & -0.14\\
\addlinespace
Q1.16 & 0.08 & 0.03 & 0.12 & 0.27 & 0.39 & -0.22 & -0.19\\
Q1.17 & 0.04 & 0.02 & 0.11 & 0.25 & 0.39 & -0.18 & -0.15\\
Q1.18 & 0.06 & 0.01 & -0.05 & 0.15 & 0.07 & -0.07 & -0.07\\
Q1.19 & 0.06 & 0.02 & -0.10 & 0.12 & 0.06 & -0.08 & -0.03\\
Q1.20 & 0.08 & 0.02 & 0.10 & 0.26 & 0.35 & -0.19 & -0.18\\
\addlinespace
Q1.21 & 0.06 & 0.02 & -0.09 & 0.13 & 0.04 & -0.07 & -0.05\\
Q1.22 & 0.07 & 0.02 & -0.04 & 0.15 & 0.09 & -0.09 & -0.07\\
Q1.23 & 0.05 & 0.03 & 0.04 & 0.31 & 0.18 & -0.16 & -0.17\\
Q1.24 & 0.09 & 0.01 & -0.08 & 0.13 & 0.08 & -0.07 & -0.08\\
Q1.25 & 0.08 & 0.02 & 0.02 & 0.20 & 0.18 & -0.15 & -0.10\\
\addlinespace
Q1.26 & 0.17 & -0.05 & 0.23 & 0.36 & 0.48 & -0.16 & -0.34\\
Q1.27 & 0.14 & -0.01 & 0.22 & 0.35 & 0.40 & -0.20 & -0.29\\
Q1.28 & 0.10 & 0.00 & 0.12 & 0.30 & 0.32 & -0.16 & -0.23\\
Q1.29 & 0.04 & 0.01 & -0.06 & 0.13 & 0.12 & -0.08 & -0.03\\
Q2 & 0.03 & 0.01 & 0.08 & 0.03 & 0.31 & -0.14 & 0.02\\
\addlinespace
Q2.Cluster & 0.01 & 0.04 & -0.02 & 0.05 & 0.01 & -0.13 & 0.09\\
Q3 & 0.02 & 0.00 & 0.02 & 0.07 & 0.11 & -0.03 & -0.07\\
Q4 & 0.12 & -0.06 & 0.18 & 0.12 & 0.14 & 0.00 & -0.19\\
Q5 & 0.17 & -0.06 & 0.18 & 0.34 & 0.47 & -0.15 & -0.28\\
Q6 & 0.07 & -0.03 & 0.00 & 0.14 & 0.07 & 0.00 & -0.12\\
\addlinespace
Q7.1 & -0.13 & -0.01 & 0.27 & 0.04 & 0.39 & -0.10 & 0.02\\
Q7.2 & 0.06 & 0.00 & -0.07 & 0.04 & 0.00 & -0.01 & -0.03\\
Q7.3 & 0.04 & 0.01 & -0.03 & 0.08 & 0.05 & -0.03 & -0.07\\
Q7.4 & 0.04 & 0.00 & 0.01 & 0.08 & 0.10 & -0.04 & -0.08\\
Q8 & 0.06 & 0.06 & -0.10 & 0.15 & 0.00 & -0.15 & -0.02\\
\addlinespace
Q9.1 & -0.08 & 0.03 & 0.01 & 0.08 & 0.13 & -0.10 & 0.05\\
Q9.2 & 0.06 & -0.01 & 0.06 & 0.09 & 0.15 & -0.06 & -0.09\\
Q9.3 & 0.09 & -0.02 & 0.00 & 0.05 & 0.09 & -0.01 & -0.10\\
Q9.4 & 0.06 & -0.01 & 0.10 & 0.10 & 0.23 & -0.04 & -0.16\\
Q9.5 & 0.10 & -0.02 & 0.01 & 0.10 & 0.12 & -0.01 & -0.16\\
\addlinespace
Q10 & 0.14 & 0.05 & -0.13 & 0.19 & -0.01 & -0.09 & -0.18\\
Q11 & 0.08 & -0.03 & -0.07 & -0.05 & -0.03 & 0.09 & -0.07\\
Q12 & 0.05 & -0.01 & -0.04 & 0.04 & -0.06 & 0.01 & -0.04\\
Q13 & 0.07 & -0.04 & 0.09 & 0.04 & 0.25 & 0.00 & -0.14\\
Q14 & -0.03 & 0.02 & -0.21 & -0.18 & -0.26 & 0.08 & 0.17\\
\addlinespace
Q15 & -0.06 & 0.05 & -0.30 & -0.09 & -0.32 & 0.05 & 0.20\\
Q16 & 0.03 & -0.02 & -0.14 & -0.04 & -0.06 & 0.07 & 0.04\\
Q16.1 & 0.02 & 0.01 & 0.04 & 0.06 & 0.15 & -0.06 & -0.07\\
Q16.2 & 0.04 & -0.01 & 0.05 & 0.09 & 0.20 & -0.07 & -0.06\\
Q16.3 & 0.04 & 0.00 & 0.01 & 0.11 & 0.18 & -0.08 & -0.06\\
\addlinespace
Q16.4 & 0.06 & -0.01 & 0.01 & 0.11 & 0.15 & -0.08 & -0.04\\
Q16.5 & 0.03 & 0.00 & 0.04 & 0.09 & 0.14 & -0.06 & -0.06\\
Q16.6 & 0.06 & 0.00 & 0.03 & 0.10 & 0.15 & -0.07 & -0.07\\
Q16.7 & 0.06 & 0.00 & 0.01 & 0.08 & 0.15 & -0.07 & -0.06\\
Q16.8 & 0.03 & 0.00 & 0.00 & 0.09 & 0.12 & -0.06 & -0.03\\
\addlinespace
Q16.9 & 0.03 & 0.00 & -0.01 & 0.07 & 0.10 & -0.05 & -0.03\\
Q16.10 & 0.06 & 0.00 & -0.02 & 0.09 & 0.02 & -0.05 & -0.04\\
Q16.11 & 0.02 & -0.01 & 0.05 & 0.09 & 0.18 & -0.05 & -0.07\\
Q16.12 & 0.08 & 0.00 & 0.00 & 0.11 & 0.04 & -0.03 & -0.11\\
Q16.13 & 0.07 & -0.01 & 0.03 & 0.10 & 0.18 & -0.06 & -0.10\\
\addlinespace
Q16.14 & 0.08 & -0.01 & 0.01 & 0.12 & 0.16 & -0.07 & -0.08\\
Q16.15 & 0.07 & 0.00 & -0.02 & 0.10 & 0.12 & -0.06 & -0.06\\
Q16.16 & 0.07 & -0.01 & 0.02 & 0.12 & 0.18 & -0.06 & -0.10\\
Q16.17 & 0.05 & 0.00 & -0.01 & 0.10 & 0.13 & -0.06 & -0.04\\
Q16.18 & 0.04 & 0.00 & -0.01 & 0.09 & 0.12 & -0.06 & -0.04\\
\addlinespace
Q16.19 & 0.04 & 0.00 & -0.02 & 0.10 & 0.10 & -0.07 & -0.04\\
Q16.20 & 0.04 & 0.00 & -0.03 & 0.09 & 0.10 & -0.06 & -0.04\\
Q16.21 & 0.04 & 0.00 & -0.02 & 0.09 & 0.12 & -0.06 & -0.02\\
Q16.22 & 0.01 & 0.01 & -0.03 & 0.09 & 0.11 & -0.07 & -0.01\\
Q16.23 & 0.04 & 0.01 & -0.02 & 0.09 & 0.08 & -0.06 & -0.03\\
\addlinespace
Q16.24 & -0.01 & -0.01 & 0.10 & 0.00 & 0.25 & -0.04 & -0.02\\
Q16.25 & 0.06 & 0.00 & 0.03 & 0.12 & 0.17 & -0.08 & -0.08\\
Q16.26 & 0.05 & 0.00 & -0.01 & 0.09 & 0.12 & -0.06 & -0.04\\
Q16.27 & 0.05 & -0.01 & 0.04 & 0.12 & 0.20 & -0.07 & -0.09\\
Q17 & 0.24 & 0.08 & -0.09 & 0.56 & 0.36 & -0.44 & -0.17\\
\addlinespace
Q18 & -0.10 & 0.06 & -0.39 & -0.18 & -0.38 & 0.11 & 0.24\\
\bottomrule
\end{longtable}\end{center}
\textbf{Questions}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
What do the numbers in the last table indicate? What numbers are the
more informative?
\item
Based on the tables and snake plot above, what are some key features
of each of the segments of this solution?
\end{enumerate}
\textbf{Answers}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
\hypertarget{step-8-robustness-analysis}{%
\subsection{Step 8: Robustness
Analysis}\label{step-8-robustness-analysis}}
We should also consider the robustness of our analysis as we change the
clustering method and parameters. Once we are comfortable with the
solution we can finally answer our first business questions:
\textbf{Questions}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
How many segments are there in our market? How many do you select and
why? Try a few and explain your final choice based on a) statistical
arguments, b) on interpretation arguments, c) on business arguments
(\textbf{you need to consider all three types of arguments})
\item
Can you describe the segments you found based on the profiles?
\item
What if you change the number of factors and in general you
\emph{iterate the whole analysis}? \textbf{Iterations} are key in data
science.
\item
Can you now answer the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/Boats-A-prerelease.pdf}{Boats
case questions}? What business decisions do you recommend to this
company based on your analysis?
\end{enumerate}
\textbf{Answers}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
\clearpage
\hypertarget{part-3-purchase-drivers}{%
\section{Part 3: Purchase Drivers}\label{part-3-purchase-drivers}}
We will now use the
\href{http://inseaddataanalytics.github.io/INSEADAnalytics/CourseSessions/Sessions67/ClassificationAnalysisReading.html}{classification
analysis} methods to understand the key purchase drivers for boats (a
similar analysis can be done for recommendation drivers). For simplicity
we do not follow the ``generic'' steps of classification discussed in
that reading, and only consider the classification and purchase drivers
analysis for the segments we found above.
We are interested in understanding the purchase drivers, hence our
\textbf{dependent} variable is column 82 of the Boats data (Q18) - why
is that? We will use only the subquestions of \textbf{Question 16} of
the case for now, and also select some of the parameters for this part
of the analysis:
\begin{Shaded}
\begin{Highlighting}[]
\CommentTok{\# Please ENTER the class (dependent) variable:}
\CommentTok{\# Please use numbers, not column names! e.g. 82 uses the 82nd column are dependent variable.}
\CommentTok{\# YOU NEED TO MAKE SURE THAT THE DEPENDENT VARIABLES TAKES ONLY 2 VALUES: 0 and 1!!!}
\NormalTok{dependent\_variable}\OtherTok{=} \DecValTok{82}
\CommentTok{\# Please ENTER the attributes to use as independent variables }
\CommentTok{\# Please use numbers, not column names! e.g. c(1:5, 7, 8) uses columns 1,2,3,4,5,7,8}
\NormalTok{independent\_variables}\OtherTok{=} \FunctionTok{c}\NormalTok{(}\DecValTok{54}\SpecialCharTok{:}\DecValTok{80}\NormalTok{) }\CommentTok{\# use 54{-}80 for boats}
\CommentTok{\# Please ENTER the profit/cost values for the correctly and wrong classified data:}
\NormalTok{actual\_1\_predict\_1 }\OtherTok{=} \DecValTok{100}
\NormalTok{actual\_1\_predict\_0 }\OtherTok{=} \SpecialCharTok{{-}}\DecValTok{75}
\NormalTok{actual\_0\_predict\_1 }\OtherTok{=} \SpecialCharTok{{-}}\DecValTok{50}
\NormalTok{actual\_0\_predict\_0 }\OtherTok{=} \DecValTok{0}
\CommentTok{\# Please ENTER the probability threshold above which an observations }
\CommentTok{\# is predicted as class 1:}
\NormalTok{Probability\_Threshold}\OtherTok{=}\DecValTok{50} \CommentTok{\# between 1 and 99\%}
\CommentTok{\# Please ENTER the percentage of data used for estimation}
\NormalTok{estimation\_data\_percent }\OtherTok{=} \DecValTok{80}
\NormalTok{validation\_data\_percent }\OtherTok{=} \DecValTok{10}
\CommentTok{\# Please enter 0 if you want to "randomly" split the data in estimation and validation/test}
\NormalTok{random\_sampling }\OtherTok{=} \DecValTok{0}
\CommentTok{\# Tree parameter}
\CommentTok{\# PLEASE ENTER THE Tree (CART) complexity control cp (e.g. 0.001 to 0.02, depending on the data)}
\NormalTok{CART\_cp }\OtherTok{=} \FloatTok{0.01}
\CommentTok{\# Please enter the minimum size of a segment for the analysis to be done only for that segment}
\NormalTok{min\_segment }\OtherTok{=} \DecValTok{100}
\end{Highlighting}
\end{Shaded}
\textbf{Questions}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
How do you select the profit/cost values for the analysis? Does the
variable 100, -50, -75, 0 above relate to the final business
decisions? How?
\item
What does the variable ``Probability\_Threshold'' affect? Does it
relate to the final business decisions? How?
\end{enumerate}
\textbf{Answers}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
We will use two classification trees and logistic regression. You can
select ``complexity'' control for one of the classification trees in the
code chunk of the raw .Rmd file here
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{CART\_control }\OtherTok{=} \FloatTok{0.001}
\end{Highlighting}
\end{Shaded}
\textbf{Question}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
How can this parameter affect the final results? What business
implications can this parameter choice have?
\end{enumerate}
\textbf{Answer}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
This is a ``small tree'' classification for example:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-28-1} \end{center}
After also running the large tree and the logistic regression
classifiers, we can then check how much ``weight'' these three methods
put on the different purchase drivers (Q16 of the survey):
\begin{center}
\begin{longtable}{lrrr}
\toprule
& CART 1 & CART 2 & Logistic Regr.\\
\midrule
Q16.1 & 0.0000000 & 0.0110197 & 0.0909091\\
Q16.2 & -1.0000000 & -0.8994543 & -0.7727273\\
Q16.3 & 0.1781076 & 0.3994461 & 0.0454545\\
Q16.4 & -0.2729842 & -0.7653571 & -0.2045455\\
Q16.5 & 0.1948052 & 0.6577938 & 0.2500000\\
\addlinespace
Q16.6 & -0.1320648 & -0.1039517 & -0.2954545\\
Q16.7 & 0.0000000 & -0.0545403 & -0.0227273\\
Q16.8 & 0.0723126 & 0.6581984 & 0.4090909\\
Q16.9 & 0.0000000 & 0.0056923 & 0.4772727\\
Q16.10 & 0.0000000 & 0.4917861 & 0.7500000\\
\addlinespace
Q16.11 & -0.3209647 & -0.2301927 & -0.4545455\\
Q16.12 & 0.1404807 & 0.7367238 & 0.7272727\\
Q16.13 & -0.1508388 & -0.3358969 & -0.5454545\\
Q16.14 & -0.2136343 & -0.2026567 & -0.3181818\\
Q16.15 & 0.1340070 & 0.0961085 & 0.2954545\\
\addlinespace
Q16.16 & -0.5755178 & -0.6404725 & -1.0000000\\
Q16.17 & -0.5041929 & -0.8468072 & -0.3863636\\
Q16.18 & -0.0994017 & -0.4040648 & -0.2272727\\
Q16.19 & 0.2862924 & 0.2973946 & 0.4090909\\
Q16.20 & 0.2652031 & 0.2999890 & 0.0681818\\
\addlinespace
Q16.21 & 0.9530697 & 1.0000000 & 0.5000000\\
Q16.22 & 0.6291439 & 0.7394180 & 0.2045455\\
Q16.23 & -0.4365717 & -0.4120430 & -0.1136364\\
Q16.24 & 0.5189457 & 0.3721827 & 0.3409091\\
Q16.25 & -0.1706865 & -0.9380984 & -0.2500000\\
\addlinespace
Q16.26 & 0.3420844 & 0.2453395 & 0.3636364\\
Q16.27 & 0.0000000 & 0.7199355 & 0.4090909\\
\bottomrule
\end{longtable}\end{center}
Finally, if we were to use the estimated classification models on the
test data, we would get the following profit curves (see the raw .Rmd
file to select the business profit parameters).
The profit curve using the small classification tree:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-34-1} \end{center}
The profit curve using the large classification tree:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-35-1} \end{center}
The profit curve using the logistic regression classifier:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-36-1} \end{center}
These are the maximum total profit achieved in the test data using the
three classifiers (without any segment specific analysis so far).
\begin{center}
\begin{longtable}{lrr}
\toprule
& Percentile & Profit\\
\midrule
Small Tree & 100.00 & 4650\\
Large Tree & 95.04 & 4675\\
Logistic Regression & 98.58 & 4850\\
\bottomrule
\end{longtable}\end{center}
\clearpage
\hypertarget{part-4-business-decisions}{%
\section{Part 4: Business Decisions}\label{part-4-business-decisions}}
We will now get the results of the overall process (parts 1-3) and based
on them make business decisions (e.g.~answer the questions of the Boats
case study). Specifically, we will study the purchase drivers for each
segment we found and consider the profit curves of the developed models
on our test data.
\textbf{Final Solution: Segment Specific Analysis}
Let's see first how many observations we have in each segment, for the
segments we selected above:
\setkeys{Gin}{height=\textheight}\adjustbox{width=\linewidth}{
\begin{tabular}{lrrrrrrr}
\toprule
& Segment 1 & Segment 2 & Segment 3 & Segment 4 & Segment 5 & Segment 6 & Segment 7\\
\midrule
Number of Obs. & 365 & 921 & 201 & 252 & 119 & 605 & 350\\
\bottomrule
\end{tabular}}\setkeys{Gin}{height=\maxheight}
This is our final segment specific analysis and solution. We can study
now the purchase drivers (average answers to Q16 of the survey) for each
segment. They are as follows:
\begin{center}
\begin{longtable}{lrrrrrrr}
\toprule
& Segment 1 & Segment 2 & Segment 3 & Segment 4 & Segment 5 & Segment 6 & Segment 7\\
\midrule
Q16.2 & -0.32 & -0.79 & -0.27 & -0.29 & -0.15 & -0.17 & -0.80\\
Q16.3 & 0.04 & -0.10 & 0.06 & 0.21 & 0.44 & -0.04 & -0.20\\
Q16.4 & -0.54 & -0.34 & 0.21 & -0.08 & 0.76 & -0.04 & 0.37\\
Q16.5 & 0.68 & 0.41 & -0.45 & 0.58 & -0.65 & -0.25 & 0.57\\
Q16.6 & -0.36 & -0.17 & -0.24 & -0.17 & 0.24 & 0.25 & -0.17\\
\addlinespace
Q16.7 & -0.04 & 0.52 & 0.03 & -0.17 & -0.09 & -0.25 & -0.40\\
Q16.8 & 0.32 & 0.14 & 0.03 & -0.29 & 0.24 & 0.29 & 0.87\\
Q16.9 & 0.43 & 0.10 & 0.06 & 0.25 & 0.71 & 0.58 & 0.13\\
Q16.10 & 0.14 & 0.90 & -0.30 & 0.17 & 0.44 & -0.04 & 0.70\\
Q16.11 & -0.04 & -0.45 & 0.67 & -0.67 & -0.15 & -0.58 & 0.17\\
\addlinespace
Q16.12 & 0.36 & 0.59 & 1.00 & 0.71 & 1.00 & 0.17 & -0.27\\
Q16.13 & -0.43 & -0.38 & -0.33 & -0.12 & -0.79 & 0.00 & -0.03\\
Q16.14 & -0.25 & -0.62 & 0.06 & -0.33 & 0.68 & -0.08 & -0.40\\
Q16.15 & 0.79 & -0.14 & 0.45 & -0.04 & -0.26 & 0.25 & 0.40\\
Q16.16 & 0.39 & -0.62 & -0.33 & -1.00 & -0.09 & -0.46 & -0.67\\
\addlinespace
Q16.17 & 0.14 & -0.21 & -0.76 & -0.21 & -0.15 & -0.42 & 0.03\\
Q16.18 & -0.68 & 0.21 & -0.45 & -0.17 & -0.47 & -0.29 & -0.30\\
Q16.19 & -0.54 & 0.41 & -0.24 & 0.67 & 0.29 & 1.00 & 0.07\\
Q16.20 & 0.21 & -0.17 & 0.33 & -0.50 & -0.18 & 0.04 & -0.10\\
Q16.21 & 0.39 & 1.00 & 0.30 & -0.62 & -0.35 & 0.42 & 0.60\\
\addlinespace
Q16.22 & -0.18 & -0.07 & 0.06 & 0.25 & 0.68 & 0.96 & -0.33\\
Q16.23 & -0.32 & 0.17 & -0.18 & 0.46 & 0.59 & -0.79 & 0.10\\
Q16.24 & 0.25 & 0.83 & -0.52 & 0.08 & -0.15 & 0.12 & 0.63\\
Q16.25 & -0.36 & -0.83 & 0.06 & 0.75 & -0.41 & -0.83 & 1.00\\
Q16.26 & 0.54 & 0.34 & -0.06 & 0.58 & -0.18 & 0.67 & 0.00\\
\addlinespace
Q16.27 & 1.00 & 0.34 & 0.03 & 0.96 & -0.38 & 0.33 & 0.67\\
\bottomrule
\end{longtable}\end{center}
The profit curves for the test data in this case are as follows. The
profit curve using the small classification tree is:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-40-1} \end{center}
The profit curve using the large classification tree is:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-41-1} \end{center}
The profit curve using the logistic regression classifier:
\begin{center}\includegraphics{MarketSegmentationProcessInClass_files/figure-latex/unnamed-chunk-42-1} \end{center}
These are the maximum total profit achieved in the test data using the
three classifiers with the selected market segmentation solution.
\begin{center}
\begin{longtable}{lrr}
\toprule
& Percentile & Profit\\
\midrule
Small Tree & 100.00 & 4650\\
Large Tree & 100.00 & 4650\\
Logistic Regression & 87.94 & 5225\\
\bottomrule
\end{longtable}\end{center}
\textbf{Questions:}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
What are the main purchase drivers for the segments and solution you
found?
\item
How different are the purchase drivers you find when you use
segmentation versus when you study all customers as ``one segment''?
Why?
\item
Based on the overall analysis, what segmentation would you choose?
\item
What is the business profit the company can achieve (as measured with
the test data) based on your solution?
\item
What business decisions can the company make based on this analysis?
\end{enumerate}
\textbf{Answers:}
\begin{itemize}
\tightlist
\item
\item
\item
\item
\item
\item
\item
\item
\item
\item
\end{itemize}
\textbf{You have now completed your first market segmentation project.}
Do you have data from another survey you can use with this report now?
\textbf{Extra question}: explore and report a new segmentation
analysis\ldots{}
\end{document}
| {
"alphanum_fraction": 0.6328358649,
"avg_line_length": 40.9637899819,
"ext": "tex",
"hexsha": "ef551f23e1335febccb4d4c815765013153dfba8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f6be6a047866acafc41025769c8ee129965aac96",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "reidwitz/INSEADAnalytics",
"max_forks_repo_path": "CourseSessions/InClassProcess/MarketSegmentationProcessInClass.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f6be6a047866acafc41025769c8ee129965aac96",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "reidwitz/INSEADAnalytics",
"max_issues_repo_path": "CourseSessions/InClassProcess/MarketSegmentationProcessInClass.tex",
"max_line_length": 516,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f6be6a047866acafc41025769c8ee129965aac96",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "reidwitz/INSEADAnalytics",
"max_stars_repo_path": "CourseSessions/InClassProcess/MarketSegmentationProcessInClass.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 30235,
"size": 67877
} |
% file: chapters/iteration-recursion.tex
\chapter{Basic Iterative and Recursive Algorithms} \label{chapter:iteration-recursion}
\input{algs/iteration-recursion/horner-rule}
\input{algs/iteration-recursion/integer-mult} | {
"alphanum_fraction": 0.8227272727,
"avg_line_length": 36.6666666667,
"ext": "tex",
"hexsha": "4993b099837a6410728d8b929d976762a4fa814b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5c8265b6368f851337ca9c0dd1476c07b6e29f83",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "hengxin/algorithms-pseudocode",
"max_forks_repo_path": "chapters/iteration-recursion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5c8265b6368f851337ca9c0dd1476c07b6e29f83",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "hengxin/algorithms-pseudocode",
"max_issues_repo_path": "chapters/iteration-recursion.tex",
"max_line_length": 86,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "5c8265b6368f851337ca9c0dd1476c07b6e29f83",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hengxin/algorithms-pseudocode",
"max_stars_repo_path": "chapters/iteration-recursion.tex",
"max_stars_repo_stars_event_max_datetime": "2019-10-27T13:01:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-11-06T08:52:25.000Z",
"num_tokens": 57,
"size": 220
} |
%----------------------------------------------------------------------------------------
%----------------------------------------------------------------------------------------
% =====================================================================================================
%
% Introduction - Introduction to Problem & Data
%
% =====================================================================================================
\section{Abstract}
\label{sec:Introduction}
We have been hired to provide sound criminal reform and policy research for \href{https://en.wikipedia.org/wiki/Terry_Sanford}{Terry Sanford (D-NC)}, junior Senator representing North Carolina for the \href{https://en.wikipedia.org/wiki/100th_United_States_Congress}{100th U.S. Congress}. We have obtained a single cross-section of crime statistics for a selection of counties in North Carolina from calendar year 1987 from which to construct our analysis. We endeavor to help the Sanford re-election campaign understand the determinants of crime and generate policy suggestions that are applicable to local [North Carolina] government agencies.
\section{Code Book}
\label{sec:Code Book}
Our crime statistics data was provided in a mysteriously sourced \textit{crime\_v2.csv} file for which we were provided only the following variable descriptions:
\label{fig:EDA - Code Book}
\begin{table}[!ht]
\small
\begin{minipage}[t]{0.5\textwidth}
\centering
\begin{tabular}[t]{{p{0.5cm}p{1.5cm}p{5cm}}}
\toprule
\textbf{Pos} & \textbf{Variable} & \textbf{Description} \\
\midrule
1 & county & county identifier \\
2 & year & 1987 \\
3 & crmrte & crimes committed per person \\
4 & prbarr & 'probability' of arrest \\
5 & prbconv & 'probability' of conviction \\
6 & prbpris & 'probability' of prison sentence \\
7 & avgsen & avg. sentence, days \\
8 & polpc & police per capita \\
9 & density & people per sq. mile \\
10 & taxpc & tax revenue per capita \\
11 & west & =1 if in western N.C. \\
12 & central & =1 if in central N.C. \\
\vdots & \vdots & \vdots\\
\bottomrule
\end{tabular}
\end{minipage} \hfill
\begin{minipage}[t]{0.5\textwidth}
\centering
\begin{tabular}[t]{{p{0.5cm}p{1.5cm}p{5cm}}}
\toprule
\textbf{Pos} & \textbf{Variable} & \textbf{Description} \\
\midrule
\vdots & \vdots & \vdots \\
13 & urban & =1 if in SMSA \\
14 & pctmin80 & perc. minority, 1980 \\
15 & wcon & weekly wage, construction \\
16 & wtuc & wkly wge, trns, util, commun \\
17 & wtrd & wkly wge, whlesle, retail trade \\
18 & wfir & wkly wge, fin, ins, real estx1 \\
19 & wser & wkly wge, service industry \\
20 & wmfg & wkly wge, manufacturing \\
21 & wfed & wkly wge, fed employees \\
22 & wsta & wkly wge, state employees \\
23 & wloc & wkly wge, local gov emps \\
24 & mix & offense mix: face-to-face/other \\
25 & pctymle & percent young male\\
\bottomrule
\end{tabular}
\end{minipage}
\caption{Crime\_V2 Code Book}
\label{fig:Code Book}
\end{table}
In the literature on crime, researchers often distinguish between the certainty of punishment (do criminals expect to get caught and face punishment) and the severity of punishment (for example, how long prison sentences are). The former concept is the motivation for the 'probability' variables. The probability of arrest is proxied by the ratio of arrests to offenses, measures drawn from the FBI's Uniform Crime Reports. The probability of conviction is proxied by the ratio of convictions to arrests, and the probability of prison sentence is proxied by the convictions resulting in a prison sentence to total convictions. The data on convictions is taken from the prison and probation files of the North Carolina Department of Correction.\\
The percent young male variable records the proportion of the population that is male and between the ages of 15 and 24. This variable, as well as percent minority, was drawn from census data. The number of police per capita was computed from the FBI's police agency employee counts. The variables for wages in different sectors were provided by the North Carolina Employment Security Commission.
| {
"alphanum_fraction": 0.6390505003,
"avg_line_length": 58.8630136986,
"ext": "tex",
"hexsha": "1319f700dd7a331688b1d37d88e458a2cd27b7f8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4f8400353d1a3b8aa8f7483951ee7e4b5683f203",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cbenge509/w203_Final",
"max_forks_repo_path": "chapters/introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4f8400353d1a3b8aa8f7483951ee7e4b5683f203",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cbenge509/w203_Final",
"max_issues_repo_path": "chapters/introduction.tex",
"max_line_length": 745,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4f8400353d1a3b8aa8f7483951ee7e4b5683f203",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cbenge509/w203_Final",
"max_stars_repo_path": "chapters/introduction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1196,
"size": 4297
} |
\chapter{Overview}
\label{cha:overview}
This chapter gives informal introduction to the RFSM language and of how to use it to describe
FSM-based systems.
\medskip
Listing~\ref{lst:rfsm-gensig} is an example of a simple RFSM program\footnote{This program is
provided in the distribution, under directory \texttt{examples/single/gensig/v2}.}. This program is
used to describe and simulate the model of a calibrated pulse generator. Given an input clock
\verb|H|, with period $T_H$, it generates a pulse of duration $n \times T_H$ whenever input
\texttt{E} is set when event $H$ occurs.
\begin{lstlisting}[language=Rfsm,frame=single,numbers=left,caption=A simple RFSM
program,label={lst:rfsm-gensig},float]
@\label{gensig-1a}@fsm model gensig <n: int> (
in h: event,
in e: bool,
out s: bool)
{
@\label{gensig-1}@ states: E0 where s=0, E1 where s=1;
states: E0, E1;
@\label{gensig-3}@ vars: k: int<0:n>;
trans:
| E0 -> E1 on h when e=1 with k:=1
@\label{gensig-4}@ | E1 -> E1 on h when k<n with k:=k+1
@\label{gensig-5}@ | E1 -> E0 on h when k=n;
itrans:
| -> E0;
@\label{gensig-1b}@}
@\label{gensig-2a}@input H : event = periodic (10,0,80)
input E : bool = value_changes (0:0, 25:1, 35:0)
@\label{gensig-2b}@output S : bool
@\label{gensig-6}@fsm g = gensig<4>(H,E,S)
\end{lstlisting}
The program can be divided in four parts.
\medskip The first part (lines \ref{gensig-1a}--\ref{gensig-1b}) gives a \textbf{generic model} of
the generator behavior. The model, named \verb|gensig|, has one parameter, \verb|n|, two inputs,
\verb|h| and \verb|e|, of type \verb|event| and \verb|bool| respectively, and one output \verb|s| of
type \verb|bool|. Its behavior is specified as a reactive FSM with two states, \verb|E0| and
\verb|E1|, and one internal variable \verb|k|. The transitions of this FSM are given after the
\verb|trans:| keyword in the form :
\begin{center}
\framebox{\lstinline[language=Rfsm]{| source_state -> destination_state on ev when guard with actions}}
\end{center}
where
\begin{itemize}
\item \emph{ev} is the event trigerring the transition,
\item \emph{guard} is a set of (boolean) conditions,
\item \emph{actions} is a set of actions performed when the transition is enabled.
\end{itemize}
The semantics is that the transition is enabled
whenever the FSM is in the source state, the event \emph{ev} occurs and all the conditions in the
guard are true. The associated actions
are then performed and the FSM moves to the destination state. For example, the first transition is
enabled whenever an event occurs on input \verb|h| and, at this instant, the value of input \verb|e|
is 1. The FSM then goes from state \verb|E0| to state \verb|E1| and sets its internal variable
\verb|k|.
The \emph{initial transition} of the FSM is given
after the \verb|itrans:| keyword in the form :
\begin{center}
\framebox{\lstinline[language=Rfsm]{| -> initial_state with actions}}
\end{center}
Here the FSM is initially in state \verb|E0|.
The value of the \texttt{s} output is attached to states, using
the \lstinline[language=Rfsm]{where} keyword~: this value is 0 when the
system is in state \texttt{E0} and 1 when the system is in state \texttt{E1}.
\textbf{Note}. In the transitions, the \lstinline[language=Rfsm]{when guard} and
\lstinline[language=Rfsm]{with actions} are optional and may be omitted.
A graphical representation of the \verb|gensig| model is given in
Fig.~\ref{fig:rfsm-gensig-model} (this representation was actually automatically generated from the
program in Listing~\ref{lst:rfsm-gensig}, as explained in Chap.~\ref{cha:rfsmc}).
\begin{figure}[!h]
\includegraphics[height=8cm]{figs/gensig-model}
\centering
\caption{A graphical representation of FSM model defined in Listing~\ref{lst:rfsm-gensig}}
\label{fig:rfsm-gensig-model}
\end{figure}
Note that, at this level, the value of the parameter \verb|n|, used in the type of the internal
variable \verb|k| (line~\ref{gensig-3}) and in the transition conditions (lines \ref{gensig-4} and
\ref{gensig-5}) is left unspecified, making the \verb|gensig| model a \emph{generic} one.
\medskip The second part of the program (lines \ref{gensig-2a}--\ref{gensig-2b}) lists \textbf{global inputs and
outputs}\footnote{In case of multi-FSM programs, this part will also contains the declaration of
\emph{shared} events and variables. See Sec.~\ref{sec:globals}.}. For global outputs the
declaration simply gives a name and a type. For global inputs, the declaration also specifies the
\textbf{stimuli} which are attached to the corresponding input for simulating the system. The
program of Listing~\ref{lst:rfsm-gensig} uses two kinds of stimuli\footnote{See
Sec.~\ref{sec:globals} for a complete description of stimuli.}. The stimuli attached to input
\verb|H| are declared as \emph{periodic}, with a period of 10 time units, a start time of 0 and a
end time of 80. This means than an event will be produced on this input at time 0, 10, 20, 30, 40,
50, 60, 70 and 80. The stimuli attached to input \verb|E| say that this input will respectively take
value 0, 1 and 0 at time 0, 25 and 35 (thus producing a ``pulse'' of duration 10 time units starting
at time 25).
\medskip
The third and last part of the program (line~\ref{gensig-6}) consists in building the global model of the system by
\emph{instanciating} the FSM model(s).
Instanciating a model creates a ``copy'' of this model for which
\begin{itemize}
\item the generic parameters (\verb|n| here) are now bound to actual values (4 here),
\item the inputs and outputs are connected to the global inputs or outputs.
\end{itemize}
\medskip
A graphical representation of the system described in Listing~\ref{lst:rfsm-gensig} is given in
Fig.~\ref{fig:rfsm-gensig-top}\footnote{Again, this representation was actually automatically generated from the
program in Listing~\ref{lst:rfsm-gensig}, as explained in Chap.~\ref{cha:rfsmc}}.
\begin{figure}[!h]
\includegraphics[height=8cm]{figs/gensig-top}
\centering
\caption{A graphical representation of system described in Listing~\ref{lst:rfsm-gensig}}
\label{fig:rfsm-gensig-top}
\end{figure}
\section*{Simulating}
\label{sec:simulating-1}
Simulating the program means computing the reaction of the system to the input stimuli. Simulation
can be performed the RFSM command-line compiler or the IDE (see Chap.~\ref{cha:rfsmc} and \ref{cha:gui} resp.).
It produces a set of
\emph{traces} in VCD (Value Change Dump) format which can visualized using \emph{waveform viewers}
such as \texttt{gtkwave}. The simulation results for the program in Listing~\ref{lst:rfsm-gensig}
are illustrated in Fig.~\ref{fig:rfsm-gensig-chrono}.
\begin{figure}[!h]
\includegraphics[width=\textwidth]{figs/gensig-chrono}
\centering
\caption{Simulation results for the program in Listing~\ref{lst:rfsm-gensig}, viewed using
\texttt{gtkwave}}
\label{fig:rfsm-gensig-chrono}
\end{figure}
\section*{Code generation}
\label{sec:code-generation-1}
RFSM can also generate code implementing the described systems simulation and/or
integration to existing applications.
\medskip
Currently, three backends are provided :
\begin{itemize}
\item a backend generating a C-based implementation of each FSM instance,
\item a backend generating a \emph{testbench} implementation in SystemC (FSM instances + stimuli
generators),
\item a backend generating a \emph{testbench} implementation in VHDL (FSM instances + stimuli
generators).
\end{itemize}
\medskip
The target language for the C backend is a C-like language augmented with
\begin{itemize}
\item a \verb|task| keyword for naming generated behaviors,
\item \verb|in|, \verb|out| and \verb|iinout| keywords for identifying inputs and outputs,
\item a builtin \verb|event| type,
\item primitives for handling events : \verb|wait_ev()|, \verb|wait_evs()| and
\verb|notify_ev()|.
\end{itemize}
The idea is that the generated code can be turned into an application for a multi-tasking operating
system by providing actual implementations of the corresponding constructs and primitives.
\medskip
For the SystemC and VHDL backends, the generated code can actually be compiled and executed for
simulation purpose and. The FSM implementations generated by the VHDL backend can also be
synthetized to be implemented hardware using hardware-specific tools\footnote{We use the
\textsc{quartus} toolchain from Intel/Altera.}.
\medskip
Appendices C1, C2 and C3 respectively give the C and SystemC code generated from the example in
Listing~\ref{lst:rfsm-gensig}.
\section*{Variant formulation}
\label{sec:variant-formulation}
In the automata described in Fig.~\ref{fig:rfsm-gensig-model} and Listing~\ref{lst:rfsm-gensig}, the
\texttt{s} output is defined by attaching its value to states.
This is typical of a so-called \emph{Moore}-style description.
Iy is also possible to specify these values by indicating how they are \emph{modified} when some
transitions are taken. A equivalent description of that given in Listing~\ref{lst:rfsm-gensig} is
obtained, for example, by specifying that
\texttt{s} is set to 0 on the initial transition and on the transition from \texttt{E1} to
\texttt{E0}, and set to 1 on the transition from \texttt{E0} to \texttt{E1}.
This style of description, often called \emph{Mealy}-style, is illustrated in
Fig.~\ref{fig:rfsm-gensig-mealy} and
Listing~\ref{lst:rfsm-gensig-mealy}. Note the absence of the \texttt{where} clause in the
declarations of states at line~\ref{gensigm-1} and, conversely, the presence of the action
\lstinline[language=Rfsm]{s:=0} and \lstinline[language=Rfsm]{s:=1} at lines~\ref{gensigm-3} and
\ref{gensigm-2b}, and \ref{gensigm-2a} respectively.
\begin{figure}[!h]
\includegraphics[height=6cm]{figs/gensig-model-mealy}
\centering
\caption{A Mealy-style reformulation of the model defined in Fig.~\ref{fig:rfsm-gensig-model}}
\label{fig:rfsm-gensig-mealy}
\end{figure}
\begin{lstlisting}[language=Rfsm,frame=single,numbers=left,caption=Transcription in RFSM of the
model given in Fig.~\ref{fig:rfsm-gensig-mealy},label={lst:rfsm-gensig-mealy},float]
fsm model gensig <n: int> (
in h: event,
in e: bool,
out s: bool)
{
@\label{gensigm-1}@ states: E0, E1;
vars: k: int<0:n>;
trans:
@\label{gensigm-2a}@ | E0 -> E1 on h when e=1 with k:=1, s:=1
| E1 -> E1 on h when k<n with k:=k+1
@\label{gensigm-2b}@ | E1 -> E0 on h when k=n, s:=0
itrans:
@\label{gensigm-3}@ | -> E0 with s:=0
}
\end{lstlisting}
\medskip \textbf{Note}. An option of the \texttt{rfsmc} compiler (\verb|-normalize|) allows to
automatically transform a Moore-style description into a Mealy-style one.
% This option is
% automatically inserted when simulating a system\footnote{\emph{I.e.} simulation is always performed
% on Mealy-style FSMs.}.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "rfsm"
%%% End:
| {
"alphanum_fraction": 0.7474905608,
"avg_line_length": 45.6260504202,
"ext": "tex",
"hexsha": "540d305329779733c38a6829cacde1bb7c46a165",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5f6dcef41b8d1a8aae01520c0c638ba346e0b36c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jserot/rfsm",
"max_forks_repo_path": "doc/overview.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "5f6dcef41b8d1a8aae01520c0c638ba346e0b36c",
"max_issues_repo_issues_event_max_datetime": "2020-03-25T11:12:03.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-06-06T07:13:18.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jserot/rfsm",
"max_issues_repo_path": "doc/overview.tex",
"max_line_length": 115,
"max_stars_count": 21,
"max_stars_repo_head_hexsha": "5f6dcef41b8d1a8aae01520c0c638ba346e0b36c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jserot/rfsm",
"max_stars_repo_path": "doc/overview.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-24T16:04:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-04-23T10:08:40.000Z",
"num_tokens": 3300,
"size": 10859
} |
\subsection{cmd -- Support for line-oriented command interpreters}
To be done ....
%
| {
"alphanum_fraction": 0.7209302326,
"avg_line_length": 17.2,
"ext": "tex",
"hexsha": "71ac49b3dd93bace1af58b09a4baf8cb3800f746",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2016-11-24T19:55:47.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-11-24T19:55:47.000Z",
"max_forks_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "remigiusz-suwalski/programming-notes",
"max_forks_repo_path": "src/python3/sections/cmd.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "remigiusz-suwalski/programming-notes",
"max_issues_repo_path": "src/python3/sections/cmd.tex",
"max_line_length": 66,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "remigiusz-suwalski/programming-notes",
"max_stars_repo_path": "src/python3/sections/cmd.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-28T05:03:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-28T05:03:18.000Z",
"num_tokens": 18,
"size": 86
} |
% ------------------------------------------------------------------
\chapter{Implementation details}\label{s:impl}
% ------------------------------------------------------------------
This chapter contains calculations and details.
% ------------------------------------------------------------------
\section{Convolution}\label{s:impl-convolution}
% ------------------------------------------------------------------
It is often convenient to express the convolution operation in matrix form. To this end, let $\phi(\bx)$ the \verb!im2row! operator, extracting all $W' \times H'$ patches from the map $\bx$ and storing them as rows of a $(H''W'') \times (H'W'D)$ matrix. Formally, this operator is given by:
\[
[\phi(\bx)]_{pq} \underset{(i,j,d)=t(p,q)}{=} x_{ijd}
\]
where the index mapping $(i,j,d) = t(p,q)$ is
\[
i = i''+i'-1, \quad
j = j''+j'-1, \quad
p = i'' + H'' (j''-1), \quad
q = i' + H'(j'-1) + H'W' (d-1).
\]
It is also useful to define the ``transposed'' operator \verb!row2im!:
\[
[\phi^*(M)]_{ijd}
=
\sum_{(p,q) \in t^{-1}(i,j,d)}
M_{pq}.
\]
Note that $\phi$ and $\phi^*$ are linear operators. Both can be expressed by a matrix $H\in\real^{(H''W''H'W'D) \times(HWD)}$ such that
\[
\vv(\phi(\bx)) = H \vv(\bx), \qquad
\vv(\phi^*(M)) = H^\top \vv(M).
\]
Hence we obtain the following expression for the vectorized output (see~\cite{kinghorn96integrals}):
\[
\vv\by =
\vv\left(\phi(\bx) F\right)
=
\begin{cases}
(I \otimes \phi(\bx)) \vv F, & \text{or, equivalently,} \\
(F^\top \otimes I) \vv \phi(\bx),
\end{cases}
\]
where $F\in\mathbb{R}^{(H'W'D)\times K}$ is the matrix obtained by reshaping the array $\bff$ and $I$ is an identity matrix of suitable dimensions. This allows obtaining the following formulas for the derivatives:
\[
\frac{dz}{d(\vv F)^\top}
=
\frac{dz}{d(\vv\by)^\top}
(I \otimes \phi(\bx))
= \vv\left[
\phi(\bx)^\top
\frac{dz}{dY}
\right]^\top
\]
where $Y\in\real^{(H''W'')\times K}$ is the matrix obtained by reshaping the array $\by$. Likewise:
\[
\frac{dz}{d(\vv \bx)^\top}
=
\frac{dz}{d(\vv\by)^\top}
(F^\top \otimes I)
\frac{d\vv \phi(\bx)}{d(\vv \bx)^\top}
=
\vv\left[
\frac{dz}{dY}
F^\top
\right]^\top
H
\]
In summary, after reshaping these terms we obtain the formulas:
\[
\boxed{
\vv\by =
\vv\left(\phi(\bx) F\right),
\qquad
\frac{dz}{dF}
=
\phi(\bx)^\top\frac{d z}{d Y},
\qquad
\frac{d z}{d X}
=
\phi^*\left(
\frac{d z}{d Y}F^\top
\right)
}
\]
where $X\in\real^{(H'W')\times D}$ is the matrix obtained by reshaping $\bx$. Notably, these expressions are used to implement the convolutional operator; while this may seem inefficient, it is instead a fast approach when the number of filters is large and it allows leveraging fast BLAS and GPU BLAS implementations.
% ------------------------------------------------------------------
\section{Convolution transpose}\label{s:impl-convolution-transpose}
% ------------------------------------------------------------------
In order to understand the definition of convolution transpose, let $\by$ obtained from $\bx$ by the convolution operator as defined in Section~\ref{s:convolution} (including padding and downsampling). Since this is a linear operation, it can be rewritten as $\vv \by = M \vv\bx$ for a suitable matrix $M$; convolution transpose computes instead $\vv \bx = M^\top \vv \by$. While this is simple to describe in term of matrices, what happens in term of indexes is tricky. In order to derive a formula for the convolution transpose, start from standard convolution (for a 1D signal):
\[
y_{i''} = \sum_{i'=1}^{H'} f_{i'} x_{S (i''-1) + i' - P_h^-},
\quad
1 \leq i'' \leq 1 + \left\lfloor \frac{H - H' + P_h^- + P_h^+}{S} \right\rfloor,
\]
where $S$ is the downsampling factor, $P_h^-$ and $P_h^+$ the padding, $H$ the length of the input signal, $\bx$ and $H'$ the length of the filter $\bff$. Due to padding, the index of the input data $\bx$ may exceed the range $[1,H]$; we implicitly assume that the signal is zero padded outside this range.
In order to derive an expression of the convolution transpose, we make use of the identity $\vv \by^\top (M \vv \bx) = (\vv \by^\top M) \vv\bx = \vv\bx^\top (M^\top \vv\by)$. Expanding this in formulas:
\begin{align*}
\sum_{i''=1}^b y_{i''}
\sum_{i'=1}^{W'} f_{i'} x_{S (i''-1) + i' -P_h^-}
&=
\sum_{i''=-\infty}^{+\infty}
\sum_{i'=-\infty}^{+\infty}
y_{i''}\ f_{i'}\ x_{S (i''-1) + i' -P_h^-}
\\
&=
\sum_{i''=-\infty}^{+\infty}
\sum_{k=-\infty}^{+\infty}
y_{i''}\ f_{k-S(i'' -1) + P_h^-}\ x_{k}
\\
&=
\sum_{i''=-\infty}^{+\infty}
\sum_{k=-\infty}^{+\infty}
y_{i''}%
\ %
f_{%
(k-1+ P_h^-) \bmod S +
S \left(1 -i'' + \left\lfloor \frac{k-1+ P_h^-}{S} \right\rfloor\right)+1
}\ x_{k}
\\
&=
\sum_{k=-\infty}^{+\infty}
x_{k}
\sum_{q=-\infty}^{+\infty}
y_{\left\lfloor \frac{k-1+ P_h^-}{S} \right\rfloor + 2 - q}
\ %
f_{(k-1+ P_h^-)\bmod S +S(q - 1)+1}.
\end{align*}
Summation ranges have been extended to infinity by assuming that all signals are zero padded as needed. In order to recover such ranges, note that $k \in [1,H]$ (since this is the range of elements of $\bx$ involved in the original convolution). Furthermore, $q\geq 1$ is the minimum value of $q$ for which the filter $\bff$ is non zero; likewise, $q\leq \lfloor (H'-1)/2\rfloor +1$ is a fairly tight upper bound on the maximum value (although, depending on $k$, there could be an element less). Hence
\begin{equation}\label{e:convt-step}
x_k =
\sum_{q=1}^{1 + \lfloor \frac{H'-1}{S} \rfloor}
y_{\left\lfloor \frac{k-1+ P_h^-}{S} \right\rfloor + 2 - q}\ %
f_{(k-1+ P_h^-)\bmod S +S(q - 1)+1},
\qquad k=1,\dots, H.
\end{equation}
Here $H$, $H'$ and $H''$ are related by:
\[
H'' = 1+ \left\lfloor \frac{H - H' + P_h^- + P_h^+}{S} \right\rfloor.
\]
If $H''$ is now given as input, it is not possible to recover $H$ uniquely; instead, all the following values are possible
\[
S_h (H''-1) +H' - P_h^- - P_h^+ \leq H < S_h H'' +H' - P_h^- - P_h^+.
\]
We use the tighter definition and set $H = S_h (H''-1) +H' - P_h^- - P_h^+$.
Note that the summation extrema in \eqref{e:convt-step} can be refined slightly to account for the finite size of $\by$ and $\bw$:
\begin{multline*}
\max\left\{
1,
\left\lfloor \frac{k-1 + P_h^-}{S} \right\rfloor + 2 - H''
\right\}
\leq q \\
\leq
1 +\min\left\{
\left\lfloor \frac{H'-1-(k-1+ P_h^-)\bmod S}{S} \right\rfloor,
\left\lfloor \frac{k-1 + P_h^-}{S} \right\rfloor
\right\}.
\end{multline*}
% ------------------------------------------------------------------
\section{Spatial pooling}\label{s:impl-pooling}
% ------------------------------------------------------------------
Since max pooling simply select for each output element an input element, the relation can be expressed in matrix form as
$
\vv\by = S(\bx) \vv \bx
$
for a suitable selector matrix $S(\bx)\in\{0,1\}^{(H''W''D) \times (HWD)}$. The derivatives can the be written as:
$
\frac{d z}{d (\vv \bx)^\top}
=
\frac{d z}{d (\vv \by)^\top}
S(\bx),
$
for all but a null set of points, where the operator is not differentiable (this usually does not pose problems in optimization by stochastic gradient). For max-pooling, similar relations exists with two differences: $S$ does not depend on the input $\bx$ and it is not binary, in order to account for the normalization factors. In summary, we have the expressions:
\begin{equation}\label{e:max-mat}
\boxed{
\vv\by = S(\bx) \vv \bx,
\qquad
\frac{d z}{d \vv \bx}
=
S(\bx)^\top
\frac{d z}{d \vv \by}.
}
\end{equation}
% ------------------------------------------------------------------
\section{Activation functions}\label{s:impl-activation}
% ------------------------------------------------------------------
% ------------------------------------------------------------------
\subsection{ReLU}\label{s:impl-relu}
% ------------------------------------------------------------------
The ReLU operator can be expressed in matrix notation as
\[
\vv\by = \diag\bfs \vv \bx,
\qquad
\frac{d z}{d \vv \bx}
=
\diag\bfs
\frac{d z}{d \vv \by}
\]
where $\bfs = [\vv \bx > 0] \in\{0,1\}^{HWD}$ is an indicator vector.
% ------------------------------------------------------------------
\subsection{Sigmoid}\label{s:impl-sigmoid}
% ------------------------------------------------------------------
The derivative of the sigmoid function is given by
\begin{align*}
\frac{dz}{dx_{ijk}}
&=
\frac{dz}{d y_{ijd}}
\frac{d y_{ijd}}{d x_{ijd}}
=
\frac{dz}{d y_{ijd}}
\frac{-1}{(1+e^{-x_{ijd}})^2} ( - e^{-x_{ijd}})
\\
&=
\frac{dz}{d y_{ijd}}
y_{ijd} (1 - y_{ijd}).
\end{align*}
In matrix notation:
\[
\frac{dz}{d\bx} = \frac{dz}{d\by} \odot
\by \odot
(\mathbf{1}\mathbf{1}^\top - \by).
\]
% ------------------------------------------------------------------
\section{Normalization}\label{s:normalization}
% ------------------------------------------------------------------
% ------------------------------------------------------------------
\subsection{Cross-channel normalization}\label{s:impl-ccnormalization}
% ------------------------------------------------------------------
The derivative is easily computed as:
\[
\frac{dz}{d x_{ijd}}
=
\frac{dz}{d y_{ijd}}
L(i,j,d|\bx)^{-\beta}
-2\alpha\beta x_{ijd}
\sum_{k:d\in G(k)}
\frac{dz}{d y_{ijk}}
L(i,j,k|\bx)^{-\beta-1} x_{ijk}
\]
where
\[
L(i,j,k|\bx) = \kappa + \alpha \sum_{t\in G(k)} x_{ijt}^2.
\]
% ------------------------------------------------------------------
\subsection{Batch normalization}\label{s:impl-bnorm}
% ------------------------------------------------------------------
The derivative of the input with respect to the network output is computed as follows:
\[
\frac{dz}{dx_{ijkt}} = \sum_{i''j''k''t''}
\frac{dz}{d y_{i''j''k''t''}}
\frac{d y_{i''j''k''t''}}{d x_{ijkt}}.
\]
Since feature channels are processed independently, all terms with $k''\not=k$ are null. Hence
\[
\frac{dz}{dx_{ijkt}} = \sum_{i''j''t''}
\frac{dz}{d y_{i''j''kt''}}
\frac{d y_{i''j''kt''}}{d x_{ijkt}},
\]
where
\[
\frac{d y_{i''j''kt''}}{d x_{ijkt}}
=
w_k
\left(\delta_{i=i'',j=j'',t=t''} - \frac{d \mu_k}{d x_{ijkt}}\right)
\frac{1}{\sqrt{\sigma^2_k + \epsilon}}
-
\frac{w_k}{2}
\left(x_{i''j''kt''} - \mu_k\right)
\left(\sigma_k^2 + \epsilon \right)^{-\frac{3}{2}}
\frac{d \sigma_k^2}{d x_{ijkt}},
\]
the derivatives with respect to the mean and variance are computed as follows:
\begin{align*}
\frac{d \mu_k}{d x_{ijkt}} &= \frac{1}{HWT},
\\
\frac{d \sigma_k^2}{d x_{i'j'kt'}}
&=
\frac{2}{HWT}
\sum_{ijt}
\left(x_{ijkt} - \mu_k \right)
\left(\delta_{i=i',j=j',t=t'} - \frac{1}{HWT} \right)
=
\frac{2}{HWT} \left(x_{i'j'kt'} - \mu_k \right),
\end{align*}
and $\delta_E$ is the indicator function of the event $E$. Hence
\begin{align*}
\frac{dz}{dx_{ijkt}}
&=
\frac{w_k}{\sqrt{\sigma^2_k + \epsilon}}
\left(
\frac{dz}{d y_{ijkt}}
-
\frac{1}{HWT}\sum_{i''j''kt''}
\frac{dz}{d y_{i''j''kt''}}
\right)
\\
&-
\frac{w_k}{2(\sigma^2_k + \epsilon)^{\frac{3}{2}}}
\sum_{i''j''kt''}
\frac{dz}{d y_{i''j''kt''}}
\left(x_{i''j''kt''} - \mu_k\right)
\frac{2}{HWT} \left(x_{ijkt} - \mu_k \right)
\end{align*}
i.e.
\begin{align*}
\frac{dz}{dx_{ijkt}}
&=
\frac{w_k}{\sqrt{\sigma^2_k + \epsilon}}
\left(
\frac{dz}{d y_{ijkt}}
-
\frac{1}{HWT}\sum_{i''j''kt''}
\frac{dz}{d y_{i''j''kt''}}
\right)
\\
&-
\frac{w_k}{\sigma^2_k + \epsilon}
\,
\frac{x_{ijkt} - \mu_k}{\sqrt{\sigma^2_k + \epsilon}}
\,
\frac{1}{HWT}
\sum_{i''j''kt''}
\frac{dz}{d y_{i''j''kt''}}
\left(x_{i''j''kt''} - \mu_k\right).
\end{align*}
% ------------------------------------------------------------------
\subsection{Spatial normalization}\label{s:impl-spnorm}
% ------------------------------------------------------------------
The neighborhood norm $n^2_{i''j''d}$ can be computed by applying average pooling to $x_{ijd}^2$ using \verb!vl_nnpool! with a $W'\times H'$ pooling region, top padding $\lfloor \frac{H'-1}{2}\rfloor$, bottom padding $H'-\lfloor \frac{H-1}{2}\rfloor-1$, and similarly for the horizontal padding.
The derivative of spatial normalization can be obtained as follows:
\begin{align*}
\frac{dz}{dx_{ijd}}
&= \sum_{i''j''d}
\frac{dz}{d y_{i''j''d}}
\frac{d y_{i''j''d}}{d x_{ijd}}
\\
&=
\sum_{i''j''d}
\frac{dz}{d y_{i''j''d}}
(1 + \alpha n_{i''j''d}^2)^{-\beta}
\frac{dx_{i''j''d}}{d x_{ijd}}
-\alpha\beta
\frac{dz}{d y_{i''j''d}}
(1 + \alpha n_{i''j''d}^2)^{-\beta-1}
x_{i''j''d}
\frac{dn_{i''j''d}^2}{d (x^2_{ijd})}
\frac{dx^2_{ijd}}{d x_{ijd}}
\\
&=
\frac{dz}{d y_{ijd}}
(1 + \alpha n_{ijd}^2)^{-\beta}
-2\alpha\beta x_{ijd}
\left[
\sum_{i''j''d}
\frac{dz}{d y_{i''j''d}}
(1 + \alpha n_{i''j''d}^2)^{-\beta-1}
x_{i''j''d}
\frac{dn_{i''j''d}^2}{d (x_{ijd}^2)}
\right]
\\
&=
\frac{dz}{d y_{ijd}}
(1 + \alpha n_{ijd}^2)^{-\beta}
-2\alpha\beta x_{ijd}
\left[
\sum_{i''j''d}
\eta_{i''j''d}
\frac{dn_{i''j''d}^2}{d (x_{ijd}^2)}
\right],
\quad
\eta_{i''j''d}=
\frac{dz}{d y_{i''j''d}}
(1 + \alpha n_{i''j''d}^2)^{-\beta-1}
x_{i''j''d}
\end{align*}
Note that the summation can be computed as the derivative of the
\verb!vl_nnpool! block.
% ------------------------------------------------------------------
\subsection{Softmax}\label{s:impl-softmax}
% ------------------------------------------------------------------
Care must be taken in evaluating the exponential in order to avoid underflow or overflow. The simplest way to do so is to divide from numerator and denominator by the maximum value:
\[
y_{ijk} = \frac{e^{x_{ijk} - \max_d x_{ijd}}}{\sum_{t=1}^D e^{x_{ijt}- \max_d x_{ijd}}}.
\]
The derivative is given by:
\[
\frac{dz}{d x_{ijd}}
=
\sum_{k}
\frac{dz}{d y_{ijk}}
\left(
e^{x_{ijd}} L(\bx)^{-1} \delta_{\{k=d\}}
-
e^{x_{ijd}}
e^{x_{ijk}} L(\bx)^{-2}
\right),
\quad
L(\bx) = \sum_{t=1}^D e^{x_{ijt}}.
\]
Simplifying:
\[
\frac{dz}{d x_{ijd}}
=
y_{ijd}
\left(
\frac{dz}{d y_{ijd}}
-
\sum_{k=1}^K
\frac{dz}{d y_{ijk}} y_{ijk}.
\right).
\]
In matrix for:
\[
\frac{dz}{dX} = Y \odot \left(\frac{dz}{dY}
- \left(\frac{dz}{dY} \odot Y\right) \bone\bone^\top\right)
\]
where $X,Y\in\real^{HW\times D}$ are the matrices obtained by reshaping the arrays
$\bx$ and $\by$. Note that the numerical implementation of this expression is straightforward once the output $Y$ has been computed with the caveats above.
% ------------------------------------------------------------------
\section{Losses and comparisons}\label{s:impl-losses}
% ------------------------------------------------------------------
% ------------------------------------------------------------------
\subsection{Log-loss}\label{s:impl-loss}
% ------------------------------------------------------------------
The derivative is
\[
\frac{dz}{dx_{ijd}} = - \frac{dz}{dy} \frac{1}{x_{ijc}} \delta_{\{d = c\}}.
\]
% ------------------------------------------------------------------
\subsection{Softmax log-loss}\label{s:impl-sfloss}
% ------------------------------------------------------------------
The derivative is given by
\[
\frac{dz}{dx_{ijd}}
= - \frac{dz}{dy} \left(\delta_{d=c} - y_{ijc}\right)
\]
where $y_{ijc}$ is the output of the softmax layer. In matrix form:
\[
\frac{dz}{dX}
= - \frac{dz}{dy} \left(\bone^\top \bfe_c - Y\right)
\]
where $X,Y\in\real^{HW\times D}$ are the matrices obtained by reshaping the arrays
$\bx$ and $\by$ and $\bfe_c$ is the indicator vector of class $c$.
% ------------------------------------------------------------------
\subsection{$p$-distance}\label{s:impl-pdistance}
% ------------------------------------------------------------------
The derivative of the operator without root is given by:
\begin{align*}
\frac{dz}{dx_{ijd}}
&=
\frac{dz}{dy_{ij}}
p |x_{ijd} - \bar x_{ijd}|^{p-1} \operatorname{sign} (x_{ijd} - \bar x_{ijd}).
\end{align*}
The derivative of the operator with root is given by:
\begin{align*}
\frac{dz}{dx_{ijd}}
&=
\frac{dz}{dy_{ij}}
\frac{1}{p}
\left(\sum_{d'} |x_{ijd'} - \bar x_{ijd'}|^p \right)^{\frac{1}{p}-1}
p |x_{ijd} - \bar x_{ijd}|^{p-1} \sign(x_{ijd} - \bar x_{ijd})
\\
&=
\frac{dz}{dy_{ij}}
\frac{|x_{ijd} - \bar x_{ijd}|^{p-1} \sign(x_{ijd} - \bar x_{ijd})}{y_{ij}^{p-1}}.
\end{align*}
The formulas simplify a little for $p=1,2$ which are therefore implemented as special cases.
| {
"alphanum_fraction": 0.5382651473,
"avg_line_length": 32.2469879518,
"ext": "tex",
"hexsha": "5bf974823d16eb13adec730e5fc3356f807b0d7b",
"lang": "TeX",
"max_forks_count": 31,
"max_forks_repo_forks_event_max_datetime": "2021-07-13T12:25:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-10-11T09:10:07.000Z",
"max_forks_repo_head_hexsha": "e6f2c8206937e8bd502749d026d688e62867f34a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Yijunmaverick/DeepJointFilter",
"max_forks_repo_path": "doc/impl.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "e6f2c8206937e8bd502749d026d688e62867f34a",
"max_issues_repo_issues_event_max_datetime": "2018-12-07T07:11:04.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-10-24T00:50:50.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Yijunmaverick/DeepJointFilter",
"max_issues_repo_path": "doc/impl.tex",
"max_line_length": 583,
"max_stars_count": 88,
"max_stars_repo_head_hexsha": "e6f2c8206937e8bd502749d026d688e62867f34a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Yijunmaverick/DeepJointFilter",
"max_stars_repo_path": "doc/impl.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-01T14:03:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-09-05T20:11:00.000Z",
"num_tokens": 5513,
"size": 16059
} |
\appendix
\addappheadtotoc
\counterwithin{figure}{section}
\section{Appendix}
\label{sec:sec010}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[htbp]
\onecolumn
\centering
\includegraphics[width=0.95\textwidth]{fig022}
\caption{When the clinician press the "Accept" button, the system will trigger two minor scenario options: (a) {\bf Active}; or (b) {\bf Passive}. During our sessions of low-fi prototype co-design with clinicians, all of them preferred the (b) {\bf Passive} option.}
\label{fig:fig022}
\twocolumn
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[htbp]
\onecolumn
\centering
\includegraphics[width=0.95\textwidth]{fig002}
\caption{Our {\it BreastScreening} assistant provides several features regarding the basics of medical imaging diagnostic. From there, we will be able to validate our DenseNet BIRADS classifier along with clinicians.}
\label{fig:fig002}
\twocolumn
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\clearpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[t!]
\onecolumn
\centering
\includegraphics[width=\textwidth]{fig024}
\caption{Here, we propose the suggested system resulted from a preliminary evaluation. From this proposed system, we will be able to understand when ({\it e.g.}, {\bf Proactive} {\it vs} {\bf Reactive}) and how ({\it e.g.}, {\bf Assertive} {\it vs} {\bf Non-Assertive}) should the assistant agent adapt and change the behaviour per each group ({\it i.e.}, Interns, Juniors, Middles and Seniors) of medical experience. In this case, we have an {\bf Assertive} communication with the second screen of a {\bf Reactive} agent.}
\label{fig:fig024}
\twocolumn
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% | {
"alphanum_fraction": 0.6317808219,
"avg_line_length": 45.625,
"ext": "tex",
"hexsha": "de505a8791ee10d8c378af99c843ba9f42537759",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "99c13e25c1f047adf5feab3bc0a086213d2b7137",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mida-project/ates-reports",
"max_forks_repo_path": "samples/final/sections/sec010.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "99c13e25c1f047adf5feab3bc0a086213d2b7137",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mida-project/ates-reports",
"max_issues_repo_path": "samples/final/sections/sec010.tex",
"max_line_length": 523,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "99c13e25c1f047adf5feab3bc0a086213d2b7137",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mida-project/ates-reports",
"max_stars_repo_path": "samples/final/sections/sec010.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 448,
"size": 1825
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Friggeri Resume/CV
% XeLaTeX Template
% Version 1.0 (5/5/13)
%
% Original author:
% Adrien Friggeri ([email protected])
% https://github.com/afriggeri/CV
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
% Important notes:
% This template needs to be compiled with XeLaTeX and the bibliography, if used,
% needs to be compiled with biber rather than bibtex.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[]{friggeri-cv} % Add 'print' as an option into the square bracket to remove colors from this template for printing
\addbibresource{bibliography.bib} % Specify the bibliography file to include publications
\begin{document}
\header{Ed}{George}{Senior Android Developer} % Your name and current job title/field
%----------------------------------------------------------------------------------------
% SIDEBAR SECTION
%----------------------------------------------------------------------------------------
\begin{aside} % In the aside, each new line forces a line break
\section{contact}
48 Chiltern Street
Aylesbury, Bucks
HP218BW
~
+44 7450 917 293
+44 1844 261 259
~
\href{mailto:[email protected]}{[email protected]}
\href{http://ed-george.github.io}{ed-george.github.io}
\href{http://github.com/ed-george}{github.com/ed-george}
\section{programming}
{\color{red} $\varheartsuit$} Android (7+ years), {\color{red} $\varheartsuit$} Kotlin,
{\color{red} $\varheartsuit$} Java, Swift, Objective-C, C, C++, Haskell, ARM Assembly, Bash, HTML, CSS, PHP
\section{libraries}
Firebase, Android Arch Components, RxJava, Dagger, Retrofit, OkHTTP, Picasso, Glide, Koin, ButterKnife, Fabric, Localytics
\section{other}
Git/SVN, MVP, MVVM, Agile/Scrum/Kanban, JIRA, SqlLite, MySQL, \LaTeX
\end{aside}
%----------------------------------------------------------------------------------------
% WORK EXPERIENCE SECTION
%----------------------------------------------------------------------------------------
\section{experience}
\begin{entrylist}
%------------------------------------------------
\entry
{April 2015 -- }
{HI MUM! SAID DAD}
{London, United Kingdom}
{\emph{Lead Android Developer} \\
Lead Android developer for Hi Mum! Said Dad, a multi-award winning mobile design agency with a high number of `household name` clients.\\
Detailed achievements \& responsibilities:
\begin{itemize}
\item Working on large scale and highly popular applications - e.g. National Trust, My Oxfam, Tastecard, Breathesport, Carling iPint and more.
\begin{itemize}
\item Providing support for the entire product life cycle from conception to release and beyond
\item Using issue tracking software (JIRA) to report defects, log work and current sprints
\end{itemize}
\item Using industry standard Android tools such as RxJava, Kotlin and Firebase to create modern applications
\item Closely working with a UI/UX design team to develop intuitive and visually stunning applications using the latest design guidelines (Material Design)
\item Providing documentation and support for multiple systems
\item Overseeing and communicating closely with both in-house and remote working developers
\item Working with a large team of professionals using an agile methodology
\end{itemize}
}
\end{entrylist}
\begin{entrylist}
\entry
{2013 -- 2015}
{BOPPL LTD}
{London, United Kingdom}
{\emph{Android Developer} \\
Sole Android developer for Boppl, a mobile ordering platform and application used within the hospitality industry, that allows users to purchase food and drink at selected venues. \\
Detailed achievements \& responsibilities:
\begin{itemize}
\item Creating and maintaining bespoke Android applications from scratch
\begin{itemize}
\item Released and managed multiple applications to the Google Play Store
\item Using standard Android tools such as DDMS and adb to debug code
\end{itemize}
\item Learned how to work with common external libraries such as GSON \& ORMLite
\item Used version control (git) effectively
\item Worked with JSON and a RESTful API
\item Working with a small team of professionals
%
%\item Finally determined the reason for \textsc{PC LOAD LETTER}:
%\begin{itemize}
%\item Paper jam
%\item Software issues:
%\begin{itemize}
%\item Word not sending the correct data to printer
%\item Windows trying to print in letter format
%\end{itemize}
%\item Coffee spilled inside printer
%\end{itemize}
%\item Broke the office record for number of kitten pictures in cubicle
\end{itemize}}
%------------------------------------------------
\entry
{2012 -- 2013}
{ED GEORGE DESIGNS}
{Oxford, United Kingdom}
{\emph{Web Designer \& Founder} \\
Founded a small web design business creating and maintaining tailored websites for multiple clients.
\begin{itemize}
\item Conducting professional client interviews and requirement specifications
\item Providing professional standard documentation
\item Working with a variety of Content Management Systems, Programming and Markup Languages
\end{itemize}}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% EDUCATION SECTION
%----------------------------------------------------------------------------------------
\section{education}
\begin{entrylist}
%------------------------------------------------
\entry
{2010 -- 2013}
{Bachelor {\normalfont of Computer Science}}
{The University of Nottingham, Nottingham}
{\emph{An Application Approach To Mobile Signal Analysis} \\ My dissertation focused primarily on the Android platform and used Java to create a mobile application to access real time information about mobile signal speed and strength. This project earned a first class mark and enabled me to use common workplace practises such as test driven development and agile software methodologies.\\Overall, I graduated with a Second Class Degree (Upper Tier).}
%------------------------------------------------
\entry
{2008 -- 2010}
{Sixth Form}
{Lord Williams's School, Thame}
{A Levels: ICT (A), Physics (B), Maths (C) and AS Geography (C)}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% AWARDS SECTION
%----------------------------------------------------------------------------------------
\section{awards}
\begin{entrylist}
%------------------------------------------------
\entry
{2017}
{Grand Prix}
{Moma Awards 2017}
{My Oxfam \\}
\entry
{2017}
{Not-For Profit/Charity Mobile Strategy/Campaign}
{Moma Awards 2017}
{My Oxfam \\}
\entry
{2017}
{Best Mobile/Tablet Customer Facing App}
{Moma Awards 2017}
{My Oxfam \\}
\entry
{2017}
{Most Innovative use of Mobile}
{Moma Awards 2017}
{My Oxfam \\}
\entry
{2017}
{Interactive Entertainment Mobile Strategy/Campaign}
{Moma Awards 2017}
{Carling Tap \\}
\entry
{2017}
{Best use of Technology}
{Moma Awards 2017}
{Carling Tap \\}
\entry
{2017}
{Lovely M-Commerce Award}
{Lovely Mobile Awards}
{Carling Tap \\}
\entry
{2015}
{Most Effective Loyalty Campaign}
{MobileMarketing Awards}
{Carling Tap \\}
\entry
{2010}
{Nick Nichols ICT Award}
{Lord Williams's School}
{}
\entry
{2006 -- 2009}
{Member of Gifted \& Talented UK}
{Lord Williams's School}
{}
\entry
{2012}
{Nominated for 'Outstanding Contribution to a Society'}
{Nottingham University}
{Nominated for contributions to CompSoc}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% COMMUNICATION SKILLS SECTION
%----------------------------------------------------------------------------------------
\section{leadership activities}
\begin{entrylist}
%------------------------------------------------
\entry
{2011 -- 2012}
{Social Secretary - Computer Society}
{University of Nottingham}
{Responsible for the organization of trips/socials for the society.
Including a trip to CeBit Hanover, Germany for 35+ students. Also responsible for organization of sponsor talks and other society duties.}
%------------------------------------------------
\entry
{2012 -- 2013}
{3\textsuperscript{rd} Year Course Rep}
{University of Nottingham}
{Responsible for guiding potential candidates around campus and the department, answering questions and meeting Parent/Student requests. Also required as a liaison to the Department with feedback from Parents/Potential Students}
%------------------------------------------------
\entry
{2012 -- 2013}
{UCAS Tour Guide}
{University of Nottingham}
{Responsible for attending Learning Forums and for voicing concerns from my peers to Academic Staff.}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% INTERESTS SECTION
%----------------------------------------------------------------------------------------
\section{interests}
\textbf{professional:} mobile development, emerging technologies, programming paradigms, web design, web app creation, software design, marketing \textbf{personal:} music, guitar (bass \& electric), current affairs, football, American sports, retro gaming
%----------------------------------------------------------------------------------------
% PUBLICATIONS SECTION
%----------------------------------------------------------------------------------------
%\section{publications}
\printbibsection{article}{article in peer-reviewed journal} % Print all articles from the bibliography
\printbibsection{book}{books} % Print all books from the bibliography
\begin{refsection} % This is a custom heading for those references marked as "inproceedings" but not containing "keyword=france"
\nocite{*}
\printbibliography[sorting=chronological, type=inproceedings, title={international peer-reviewed conferences/proceedings}, notkeyword={france}, heading=subbibliography]
\end{refsection}
\begin{refsection} % This is a custom heading for those references marked as "inproceedings" and containing "keyword=france"
\nocite{*}
\printbibliography[sorting=chronological, type=inproceedings, title={local peer-reviewed conferences/proceedings}, keyword={france}, heading=subbibliography]
\end{refsection}
\printbibsection{misc}{other publications} % Print all miscellaneous entries from the bibliography
\printbibsection{report}{research reports} % Print all research reports from the bibliography
%----------------------------------------------------------------------------------------
\end{document} | {
"alphanum_fraction": 0.6273897407,
"avg_line_length": 37.3356890459,
"ext": "tex",
"hexsha": "8ba53316d778c265ac6030fce1b3a73827a55473",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0db9075bdee74ec68d982b50211f77d5679f5ae4",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ed-george/ed-george.github.io",
"max_forks_repo_path": "other/cv/EdGeorgeCv.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "0db9075bdee74ec68d982b50211f77d5679f5ae4",
"max_issues_repo_issues_event_max_datetime": "2022-02-26T05:10:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-15T17:48:56.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ed-george/ed-george.github.io",
"max_issues_repo_path": "other/cv/EdGeorgeCv.tex",
"max_line_length": 453,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0db9075bdee74ec68d982b50211f77d5679f5ae4",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ed-george/ed-george.github.io",
"max_stars_repo_path": "other/cv/EdGeorgeCv.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2295,
"size": 10566
} |
\clearpage
\subsection{Using Reference Parameters} % (fold)
\label{sub:using_reference_parameters}
% subsection using_reference_parameters (end) | {
"alphanum_fraction": 0.8163265306,
"avg_line_length": 21,
"ext": "tex",
"hexsha": "7ee9db0a3c47de037ce4de808e4b2416770c6b67",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T07:42:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-02T03:18:37.000Z",
"max_forks_repo_head_hexsha": "8f3040983d420129f90bcc4bd69a96d8743c412c",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "macite/programming-arcana",
"max_forks_repo_path": "topics/parameters/c/c-dereference.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_issues_repo_issues_event_max_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "thoth-tech/programming-arcana",
"max_issues_repo_path": "topics/parameters/c/c-dereference.tex",
"max_line_length": 48,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "thoth-tech/programming-arcana",
"max_stars_repo_path": "topics/parameters/c/c-dereference.tex",
"max_stars_repo_stars_event_max_datetime": "2021-08-10T04:50:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-10T04:50:54.000Z",
"num_tokens": 32,
"size": 147
} |
\section{Norm and distance}
\subsection{Norm}
\textit{Euclidean norm} (or just norm) is\\
$||x||= \sqrt{x_1^2+x_2^2+...+x_n^2}= \sqrt{x^Tx}$\\
\textbf{Properties}\\
\textbullet homogeneity: $||\beta x|| = |\beta ||x|||$\\
\textbullet triangle inequality: $||x + y|| \leq ||x|| + ||y||$\\
\textbullet non negativity: $||x|| \geq 0$\\
\textbullet definiteness: $||x|| = 0$ only if x = 0\\
\textit{positive definiteness} = non negativity + definiteness\\
\textbf{rms}(x) = $\sqrt{\frac{x_1^2+...+x_n^2}{n}} = \frac{||x||}{\sqrt{n}}$ \\
\textbullet \textit{Norm of a sum}:\\
$\Vert a + b \Vert^2 = (x + y)^T(x + y) = \Vert x \Vert^2 + 2x^Ty + \Vert b \Vert^2$\\
\textbf{Norm of block vectors}
$||(a,b,c)|| = \sqrt{||a||^2 + ||b||^2 + ||c||^2} = ||(||a||, ||b||, ||c||)||$\\
\textbf{Chebyshev inequality}
k of its entries satisfy $|x_i| \geq a$, \\then $\frac{k}{n} \leq (\frac{\textbf{rms}(x)}{a})^2$
\subsection{Distance}
$\textbf{dist}(a,b) = ||a - b||$\\
\textit{Triangle Inequality}: $||a - c||^2 = ||(a - b) + (b - c)|| \leq ||a-b|| + ||b-c||$\\
$z_j$ is the nearest neighbor of x if $||x-z_j|| \leq ||x-z_i||, i=1,..,m$\\
\subsection{Standard Deviation}
de-meaned vector: $\tilde{x} = x - \textbf{avg}(x)\textbf{1}$\\
standard deviation: \\\textbf{std}(x) = \textbf{rms}($\tilde{x}$) = $\frac{||x - (\textbf{1}^Tx/n)\textbf{1}||}{\sqrt{n}}$\\
${\textbf{rms}(x)}^2 = {\textbf{avg}(x)}^2 + {\textbf{std}(x)}^2$\\
By Chebyshev inequality, $|x_i - \textbf{avg}(x)| \geq \alpha \textbf{std}(x)$ then $k/n \leq (std(x)/a)^2$. (This inequality is only interesting for $a > std(x)$) \\
\textit{Cauchy–Schwarz inequality}: $|a^Tb| \leq \Vert a\Vert \Vert b \Vert$
\subsection{Angle}
angle between two nonzero vectors a, b defined as\\
$\angle(a,b) = arccos(\frac{a^Tb}{\Vert a \Vert \Vert b \Vert})$\\
$a^Tb = \Vert a \Vert \Vert b \Vert cos (\angle(a,b))$\\
\textbf{Classification of angles}\\
$\theta = \pi/2$: $a \perp b$\\
$\theta = 0$: $a^Tb = \Vert a \Vert \Vert b \Vert$\\
$\theta = \pi = 180^{\circ}$ : $a^Tb = -\Vert a \Vert \Vert b\Vert$\\
$\theta \leq \pi/2 = 90^{\circ} = a^Tb \geq 0$\\
$\theta \geq \pi/2 = 90^{\circ} = a^Tb \leq 0$\\
\textbf{Correlation Coeficient ($\rho$)}
$\rho = \frac{\tilde{a}^T\tilde{b}}{\Vert\tilde{a}\Vert\Vert\tilde{b}\Vert}$\\
With $u = \tilde{a}/\textbf{std}(a)$ \& $u = \tilde{b}/\textbf{std}(b)$, \\
$\rho = u^Tv/n$ where $\Vert u \Vert = \Vert v \Vert = n$\\
$std(a+b) = \\
\sqrt{std(a)^2+ 2\rho std(a)std(b) +std(b)^2}$\\
\textbf{Properties of standard deviation}\\
\textbullet $\textbf{std}(x+a1) = \textbf{std}(x)$\\
\textbullet $\textbf{std}(ax) = |a| std(x)$\\
\textbf{Standardization}
$z = \frac{1}{\textbf{std}(x)}(x - \textbf{avg}(x)\textbf{1})$
\subsection{Complexity}
\textbullet \textit{norm}: 2n\\
\textbullet \textit{\textbf{rms}}: 2n\\
\textbullet \textit{\textbf{dist}(a,b)}: 3n\\
\textbullet \textit{$\angle(a,b)$}: 6n\\
| {
"alphanum_fraction": 0.5801526718,
"avg_line_length": 49.6896551724,
"ext": "tex",
"hexsha": "7ed3a3b53092e43a5a5b522df93e7affc47f84e3",
"lang": "TeX",
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2019-11-11T20:02:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-10-01T14:09:07.000Z",
"max_forks_repo_head_hexsha": "fa14d5a297b10e79cdc69e9aee67f8a1744d9340",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "meanmachin3/COT5615",
"max_forks_repo_path": "cheatsheet/inhalt/chapter3.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fa14d5a297b10e79cdc69e9aee67f8a1744d9340",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "meanmachin3/COT5615",
"max_issues_repo_path": "cheatsheet/inhalt/chapter3.tex",
"max_line_length": 166,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "fa14d5a297b10e79cdc69e9aee67f8a1744d9340",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "meanmachin3/COT5615",
"max_stars_repo_path": "cheatsheet/inhalt/chapter3.tex",
"max_stars_repo_stars_event_max_datetime": "2020-10-26T00:44:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-10-03T00:00:36.000Z",
"num_tokens": 1242,
"size": 2882
} |
\documentclass[11pt,a4paper, final, twoside]{article}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{amsmath}
\usepackage{fancyhdr}
\usepackage{amsthm}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amscd}
\usepackage{latexsym}
\usepackage{graphicx}
\usepackage{graphics}
\usepackage{natbib}
\usepackage[colorlinks=true, urlcolor=blue, linkcolor=black, citecolor=black]{hyperref}
\usepackage{color}
\usepackage{natbib}
\usepackage{sectsty}
\setcounter{MaxMatrixCols}{10}
\sectionfont{\fontsize{12}{15}\selectfont}
\renewcommand{\thefootnote}{}
\setlength{\oddsidemargin}{1pt} \setlength{\evensidemargin}{1pt}
\setlength{\hoffset}{-1in} \addtolength{\hoffset}{25mm}
\setlength{\textwidth}{140mm}
\setlength{\marginparsep}{0pt} \setlength{\marginparwidth}{0pt}
\setlength{\topmargin}{0pt}
\setlength{\voffset}{-2in} \addtolength{\voffset}{20mm}
\setlength{\textheight}{300mm}
\setlength{\headsep}{20mm}
\setlength{\footskip}{15mm}
\pagestyle{fancy}
\fancyhead{} \fancyfoot{}
% Theorem environments
\newtheorem{thm}{Theorem}[section]
\newtheorem{algorithm}[thm]{Algorithm}
\newtheorem{axiom}[thm]{Axiom}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{example}[thm]{Example}
\newtheorem{exercise}[thm]{Exercise}
\newtheorem{notation}[thm]{Notation}
\newtheorem{problem}[thm]{Problem}
\theoremstyle{proposition}
\newtheorem{prop}{Proposition}[section]
\newtheorem{case}[thm]{Case}
\newtheorem{claim}[thm]{Claim}
\newtheorem{conclusion}[thm]{Conclusion}
\newtheorem{condition}[thm]{Condition}
\newtheorem{conjecture}[thm]{Conjecture}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{criterion}[thm]{Criterion}
\theoremstyle{definition}
\newtheorem{defn}{Definition}[section]
\theoremstyle{remark}
\newtheorem{rem}{Remark}[section]
\newtheorem{solution}[thm]{Solution}
\newtheorem{summary}[thm]{Summary}
\numberwithin{equation}{section}
\renewcommand{\rmdefault}{phv} % Arial
\renewcommand{\sfdefault}{phv} % Arial
\pagenumbering{arabic} % 1, 2, 3, 4, ...
\begin{document}
\hyphenpenalty=100000
\begin{center}
{\Large \textbf{\\Parallel appraoch to single-source shortest path problem\\on large complete graphs }}\\[5mm]
{\large \textbf{Ruijian An}\\[1mm]}
{\normalsize \emph{Under the supervision of Professor Bogdan Simion}\\[1mm]}
\end{center}
\section{Introduction}\label{I1}
The single-source shortest path (SSSP) problem is a fundamental part of graph theory, with applications in transportation theory, road networks, DNA micro-arrays and many others. Given a weighted graph \emph{G = (V, E)} and a source vertex \emph{v}, the SSSP problem computes the shortest paths from \emph{v} to all other vertices.
There are many efficient sequential solutions to this problem, such as Dijkstra's algorithm and the Bellman-Ford algorithm\cite{corman}. To maximize the work efficiency, Dijkstra's algorithm, which runs in \emph{O($|E| + |V|log|V|$)}, utilizes a greedy strategy, processing vertices one by one. On the other hand, the Bellman-Ford algorithm, which runs in O(\emph{$|E||V|$}), processes all vertices in every stage. Nevertheless, for large complete graphs, these sequential algorithms take a prohibitive amount of time for time-sensitive applications.
Although Dijkstra's algorithm is sequential in nature, the Bellman-Ford algorithm contains a lot of parallelism which can be exploited to speed up the processing of large graphs. Our goal is to parallelize the latter algorithm, by utilizing the massively parallel architecture of Graphics Processing Units(GPUs) on large complete graphs. By efficiently implementing the algorithm on a GPU, our implementation gains a speedup of 167x over the naive parallel GPU version and 10.6x over the optimized parallel CPU version for large graphs with up to 20000 vertices.
\section{Background}\label{I2}
\subsection{The Bellman-Ford algorithm}
In the Bellman-Ford algorithm, each vertex in the graph has an attribute \emph{d}, which records the tentative distance from source vertex to itself. The algorithm also employs a technique called \emph{relaxation} to process edges. If an edge (\emph{u,v}) with weight \emph{w(u,v)} is relaxed, \emph{v.d} is replaced by \emph{min(v.d, u.d + w(u,v))}. The Bellman-Ford algorithm relaxes each edge for $\left|V\right|$ - 1 times, resulting in a time complexity of O($\left|V\right|$$\left|E\right|$).
\subsection{GPU programming model}
NVIDIA's CUDA is a user-friendly compute framework for carrying out general-purpose computations on GPUs\cite{nvi}. A CUDA program contains a kernel function, which is executed on the GPU, using a user-specified number of threads. All threads in a kernel have a unique ID, such that the kernel can assign different work based on the thread IDs. A typical CUDA program consists of three parts: copying the input data to GPU memory, launching a kernel which performs computations on the data and transferring data back to CPU memory. The data transfer time between CPU and GPU memory is typically a scalability bottleneck.
\newpage
\section{Approach}\label{I3}
Our goal is to exploit the inherent data parallelism from the Bellman-Ford algorithm and accelerate it using parallelization techniques. First, we implement a straightforward naive parallelization, both on the CPU and on the GPU. Next, we optimize both implementations by improving work efficiency. Finally, we leverage GPU-specific features and mitigate the overheads of transferring data between main memory and device memory.
The straightforward naive parallelization takes advantage of the inherent data parallelism. First, the shortest path estimate can be initialized in parallel. Secondly, edge relaxation, the most expensive step in the algorithm, can be parallelized. In existing parallel Bellman-Ford algorithms, each thread gets assigned a set of vertices and is responsible for relaxing either all the incoming or outgoing edges from its vertices\cite{Busato2016AnEI}\cite{Harish2007}. This approach uses adjacency lists and is well-suited for sparse graphs where $|V|$ is much larger than $|E|$. We use this as our naive multithreaded CPU implementation and naive GPU implementation.
We observe that for complete graphs, partitioning work based on edges instead of vertices can exploit a higher degree of parallelism. While this approach is not straightforward to implement on the CPU, GPUs provide the ability to structure computations in a two-dimensional grid, where partitioning work by edges comes naturally. We use this observation as the baseline for our optimized GPU implementation and employ an adjacency matrix representation to better enable parallel edge relaxation on the GPU in an efficient manner.
Next, we implement the optimized versions of the parallel CPU and GPU algorithms, which provide better work efficiency. We take advantage of two techniques, to discard unnecessary work. In the basic algorithm, all edges are required to be relaxed in each iteration, but not all relaxations are useful. For edge \emph{(u, v)}, the key of \emph{relaxation} is to compare \emph{v.d} with \emph{u.d + w(u,v)}, so the relaxation is useful only if the \emph{v.d} is updated in the previous iteration. Otherwise, the relaxation of \emph{(u, v)} involves redundant work. In the improved implementation, a bit \emph{mask} is used to record whether vertices are active. If a vertex's attribute \emph{d} is modified, then the corresponding bit indicates it is an active vertex. In the subsequent iterations, only edges whose starting vertex is active will be relaxed.
Building on our previous observation, we can also reduce the number of iterations that the basic algorithm executes ($|V|-1$). The program can terminate once all shortest path distances converge, so instead of stopping at $|V|$-1, we can terminate early in an iteration when there are no more active vertices. We maintain a \emph{flag} to indicate if any vertices are active during the current iteration, and once the flag becomes unset, we skip the remaining iterations and terminate the execution early.
Finally, a serialization bottleneck in our naive GPU implementation is the data transfer to device memory. In the optimized GPU algorithm, we restructure the code to leverage streams, which enable pipelining data transfers with computations.
We create two streams \emph{(stream1, stream2)} and partition the adjacency matrix equally between them. Each part of the matrix is transferred to the device and processes in a different stream. Consequently, computations on the first part of the matrix can be carried out concurrently with transferring the second part of the matrix to the device, thus reducing the transfer time in half. We can further reduce this by similarly using more streams.
\newpage
\section{Analysis}\label{I4}
We conduct two sets of experiments (using randomly-generated complete graphs): first, we compare a
sequential CPU implementation, a naive multithreaded CPU version (by doing edge relaxation in parallel over 4 physical cores on a Core i7 6700HQ @ 2.6Ghz), and a naive GPU-based algorithm(on an NVIDIA Geforce GTX750 Ti, 640 cores, @ 1100MHz), all of which are unoptimized. The second graph compares the optimized multithreaded CPU implementation with the optimized GPU algorithm as described in Section 3. We include the basic (unoptimized) \textit{parallel} CPU and GPU implementations as a reference point in the second graph. Both optimized versions enforce the aforementioned work efficiency optimizations, and the latter also leverages GPU streams for overlapping data transfers with computations.\newline
\begin{figure}[h]
\centering
\includegraphics[width=0.8\textwidth]{fig1}
\caption{Straightforward (work inefficient) parallelization on the CPU and GPU (log scale on y axis)}
\end{figure}
\begin{figure}[h]
\centering
\includegraphics[width=0.8\textwidth]{fig2}
\caption{Work ineffcient vs. optimized algorithm implementations (log scale on y axis)}
\label{ab}
\end{figure}
\newpage
In the first comparison, the naive GPU implementation outperforms both sequential CPU and naive multithreaded CPU versions. All implementations perform unnecessary computations, and the GPU implementation also suffers from high data transfer overheads to device memory. Consequently, we restructure parallel algorithms to reduce work inefficiencies, and use GPU streams to overlap memory transfers.
Our results from Figure 2 show that the optimized GPU algorithm outperforms its optimized CPU counterpart, achieving a 10.6x speedup for a large graph with 20000 vertices. When the graph is small, the difference between the two implementations is not significant because only a subset of GPU cores are utilized and CPU cores operate at a much higher clock rate. However, the gap increases with input size, as more GPU cores actively participate in computations, while the same number of CPU cores process increasingly larger tasks. Although our testing limitations did not permit testing with larger graphs (e.g., millions of vertices), we expect this gap will increase even further.
The optimizations and use of streams in the GPU implementation reduce the execution time by 167x compared to the work inefficient GPU version, while the optimized CPU implementation achieves up to 334x speedup over the work inefficient multithreaded CPU implementation, on large graphs.
\section{Conclusion}
Our work proposes an improved parallel Bellman-Ford algorithm implementation. By reworking the algorithm to leverage GPU-specific optimizations, reduce work inefficiencies, and deal with the data transfer overheads using streams, the proposed implementation is 167x faster than a naive GPU implementation, and 10.6x faster than an optimized parallel CPU implementation, on large graphs.
\bibliography{s.bib}
\bibliographystyle{unsrt}
\end{document}
| {
"alphanum_fraction": 0.781,
"avg_line_length": 82.7586206897,
"ext": "tex",
"hexsha": "282e673c4c227b18ce0038a92d1fdc96e03ee316",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2018-05-06T14:42:26.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-11-09T23:30:20.000Z",
"max_forks_repo_head_hexsha": "438bb504cb1b6d9079df3e4f19a59ca5856e2541",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rucs-uoft/rucs-uoft.github.io",
"max_forks_repo_path": "assets/2017/R.An/RUCS-tempalte.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "438bb504cb1b6d9079df3e4f19a59ca5856e2541",
"max_issues_repo_issues_event_max_datetime": "2018-05-22T19:48:58.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-12-14T18:43:37.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rucs-uoft/rucs-uoft.github.io",
"max_issues_repo_path": "assets/2017/R.An/RUCS-tempalte.tex",
"max_line_length": 857,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "438bb504cb1b6d9079df3e4f19a59ca5856e2541",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rucs-uoft/rucs-uoft.github.io",
"max_stars_repo_path": "assets/2017/R.An/RUCS-tempalte.tex",
"max_stars_repo_stars_event_max_datetime": "2021-01-01T18:20:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-27T18:51:58.000Z",
"num_tokens": 2779,
"size": 12000
} |
\chapter{Simple Machines}
As mentioned earlier, physicists define work to be the force applied
times the distance it is applied over. So, if you pushed your car 100
meters with 17 newtons of force, you have done 1700 joules of work.
Humans have always had to move really heavy things, so many centuries
ago we developed simple machines to decrease the amount of force
necessary to execute those tasks. These include things like:
\begin{itemize}
\item Levers
\item Pulleys
\item Ramps
\item Gears
\item Hydraulics
\item Screws
\end{itemize}
\includegraphics[width=0.8\textwidth]{Simple_Machines.png}
While these machines can decrease the force needed, they don't change
the amount of work that must be done. So if the force is decreased to
a third, the distance that you must apply the force is increased by a
factor of three.
``Mechanical gain'' is what we call the increase in force.
\section{Levers}
A lever rotates on a fulcrum. To decrease the necessary force, the load
is placed nearer to the fulcrum than where the force is applied.
In particular, physicists talk about the \newterm{torque} created by a
force. When you push on a lever, the torque is the product of the
force you exert and the distance from the point of rotation.
Torque is typically measured in newton-meters.
To balance two torques, the products must be the same. So, assuming
that the forces are applied in the proper direction,
$$R_L F_L = R_A F_A$$
where $R_L$ and $R_A$ are the distance from the fulcrum to the where
the load's force and the applied force (respectively) are applied, and
$F_L$ and $F_A$ are the amounts of the forces.
\begin{Exercise}[title={Lever}, label=lever]
Paul, who weighs 70 kilograms, sits on a see-saw 4 meters from the
fulcrum. Jan, who weighs 50 kilograms, wants to balance. How far
should Jan sit from the fulcrum?
\end{Exercise}
\begin{Answer}[ref=lever]
Paul is exerting $(70)(9.8)$ newtons of force at 4 meters from the
fulcrum, so he is creating a torque of 2,744 newton-meters of torque
on the see-saw. Jan is creating $(50)(9.5) = 490$ newtons of
force.
If $r$ is the distance from the fulcrum to Jan's seat, to balance
$490 r = 2744$, so $r = 5.6$ meters.
\end{Answer}
% KA: https://www.khanacademy.org/science/physics/discoveries/simple-machines-explorations/a/lever
\includegraphics[width=0.8\textwidth]{WD=WD.png}
\section{Ramps}
Ramps, or include planes, let you roll or slide objects up to a higher
level. Steeper ramps give you less mechanical gain. For example, it is much easier
to roll a ball up a wheelchair ramp than on a skateboard ramp.
% diagram neeeded
Assuming the ramp has a constant steepness, the mechanical gain is
equal to the ratio of the length of the ramp divided by the amount
that it rises.
If you assume there is no friction, the force that you push a weight up the ramp will be:
$$F_A = \frac{V}{L} F_G$$
Where $F_A$ is the force you need to push. $L$ is the length of the
ramp, $V$ is the amount of vertical gain and $F_G$ is the force of
gravity on the mass.
(We haven't talked about the sine function yet, but in case you already know about it: Note that
$$\frac{V}{L} = \sin{\theta}$$
where $\theta$ is the angle between the ramp and level.)
\begin{Exercise}[title={Ramp}, label=ramp]
A barrel of oil weighs 136 kilograms. You can push with a force of
up to 300 newtons. You have to get the barrel onto a platform that is 2
meters. What is the shortest board that you can use as a ramp?
\end{Exercise}
\begin{Answer}[ref=ramp]
To lift the barrel would require $136 \times 9.8 = 1,332.8$ newtons of force.
Letting $L$ be the length of the ramp:
$$300= \frac{2}{L} 1332.8$$
So $L = 8.885$ meters.
\end{Answer}
\section{Gears}
Gears (which might have a chain connecting them like on a bicycle)
have teeth and come in pairs. You apply torque to one gear, and it
applies torque to another. The torque is increased or decreased based
on the ratio between the teeth on the gears.
% ADD: Driver, Driven, Idler
\includegraphics[width=0.8\textwidth]{Gears.png}
If $N_A$ is the number of teeth on the gear you are turning with a
torque of $T_A$, and $N_L$ is the number of teeth on the gear it is
turning, the resulting torque is:
$$T_L = \frac{N_A}{N_L} T_A$$
\begin{Exercise}[title={Gears}, label=gear]
The bicycle is an interesting case because we are not trying to get
mechanical gain. We want to spin the pedals slower with more force.
You like to pedal your bike at 70 revolutions per minute. The
chainring that is connected to your pedals has 53 teeth. The
circumference of your tire is 2.2 meters. You wish to ride a 583 meters
per minute.
How many teeth should the rear sprocket have?
\end{Exercise}
\begin{Answer}[ref=ramp]
$$583 = (70)(2.2)\frac{53}{n}$$
Thus $n = 14$ teeth.
\end{Answer}
% KA: https://www.khanacademy.org/science/physics/discoveries/simple-machines-explorations/a/simple-machines-and-how-to-use-this-tutorial
\section{Hydraulics}
In a hydraulic system, like the braking system of a car, you exert
force on a piston filled with fluid. The fluid carries that pressure
into another cylinder. The pressure of the fluid pushes the piston in
that cylinder out.
\includegraphics[width=0.8\textwidth]{Hydraulics.png}
The pressure in the hose can be measured in pounds per square inch
(PSI) or newtons per square meter (Pascals or Pa). We will use Pascals.
% ADD: Create a page in the back of the book with units
To figure out how much pressure you create, you divide the force by
the area of the piston head you are pushing.
To figure out how much force that creates on the other end, you
multiply the pressure times the area of the piston head that is
pushing the load.
\begin{Exercise}[title={Hydraulics}, label=hydraulics]
Your car has disc brakes. When you put 2,500,000 pascals of pressure on the
brake fluid, the car stops quickly. As the car designer, you would like
that to require 12 newtons of force from the driver's foot.
What should the radius of the master cylinder (the one the driver is pushing on) be?
\end{Exercise}
\begin{Answer}[ref=hydraulics]
We are looking for $r$, the radius of the piston head in meters. The area of the piston head is $\pi r^2$.
The pressure in pascals of the brake fluid is given by $12 / (\pi r^2)$.
$$2,500,000 = \frac{12}{\pi r^2}$$
So $r = \sqrt{\frac{12}{\pi \times 2.5 \times 10^6}} = 0.001236077446474$ meters.
\end{Answer}
% KA: https://youtu.be/Pn5YEMwQb4Y
| {
"alphanum_fraction": 0.7467161181,
"avg_line_length": 34.2380952381,
"ext": "tex",
"hexsha": "f70e82f999eaee38380c54070cfe4ffe5addb426",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-05T00:43:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-05T00:43:58.000Z",
"max_forks_repo_head_hexsha": "b7b4896d804c49cbc93fe86a0d2fce531afbcc1f",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "hillegass/sequence",
"max_forks_repo_path": "Modules/MatterEnergy/simple_machines-en_US.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b7b4896d804c49cbc93fe86a0d2fce531afbcc1f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "hillegass/sequence",
"max_issues_repo_path": "Modules/MatterEnergy/simple_machines-en_US.tex",
"max_line_length": 137,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "b7b4896d804c49cbc93fe86a0d2fce531afbcc1f",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "hillegass/sequence",
"max_stars_repo_path": "Modules/MatterEnergy/simple_machines-en_US.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-05T00:43:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-06-13T17:19:16.000Z",
"num_tokens": 1835,
"size": 6471
} |
\documentclass[executivepaper]{extarticle}
\usepackage[margin=3.0cm]{geometry}
\usepackage{fancyhdr}
\usepackage{extsizes}
\usepackage{enumerate}
\usepackage{enumitem}
\usepackage{mathtools}
\begin{document}
\vspace*{-40mm}
\begin{center}
\textbf{Modern Class Notes}
\end{center}
\section*{1/12/16}
\vspace{-5.5mm}
\noindent \rule{2cm}{0.5pt}
A group is an object that tries to describe the symmetry of something \\
Going to study:
\begin{enumerate}
\item Permutation groups
\item Matrix groups
\item Cosets and Legrange's Theorem
\item And other constructs
\end{enumerate}
Review of some discrete math stuff: \\
\begin{enumerate}
\item A function $f: x \rightarrow y$ (where X and Y are sets) is an assignment of an element of Y for each element of X
\item Fact: let $f: x \rightarrow y$ be a function. Then:
\begin{enumerate}
\item $f$ is invertible $\iff f$ is one to one
\end{enumerate}
\end{enumerate}
Ex)Let x={1, 2, 3,...,n}\\
$\pi(1)=2$
$\pi(2)=3$
$\pi(n-1)=n$
$\pi(n)=1$\\
A permutation is a one-to-one and onto function from a finite set to itself \\
At least 3 ways of recording a permutation: \\
\begin{enumerate}
\item Two line notation
\item One line notation (works for permutations of order up to 9)
\item Cycle notation: start with a number and write down where it goes and then repeat this process for the rest of the numbers
\item
\end{enumerate}
\end{document} | {
"alphanum_fraction": 0.7183098592,
"avg_line_length": 16.9047619048,
"ext": "tex",
"hexsha": "848cf09688501aba994e89b1edbcd4fd38f4fb14",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "53530f5864af952afb4083c79632bb4280fd5c3a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "busebd12/Mathematics",
"max_forks_repo_path": "Modern_Algebra(Group_Theory)/ModernAlgebraClassNotes.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "53530f5864af952afb4083c79632bb4280fd5c3a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "busebd12/Mathematics",
"max_issues_repo_path": "Modern_Algebra(Group_Theory)/ModernAlgebraClassNotes.tex",
"max_line_length": 127,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "53530f5864af952afb4083c79632bb4280fd5c3a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "busebd12/Mathematics",
"max_stars_repo_path": "Modern_Algebra(Group_Theory)/ModernAlgebraClassNotes.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 422,
"size": 1420
} |
% !TEX TS-program = lualatex
\documentclass[notoc,notitlepage]{tufte-book}
% \nonstopmode % uncomment to enable nonstopmode
\usepackage{classnotetitle}
\title{PMATH450 --- Lebesgue Integration and Fourier Analysis}
\author{Johnson Ng}
\subtitle{Classnotes for Spring 2019}
\credentials{BMath (Hons), Pure Mathematics major, Actuarial Science Minor}
\institution{University of Waterloo}
\usepackage{fontspec}
\input{latex-classnotes-preamble.tex}
\setmainfont[Ligatures=TeX]{P052}
\theoremprework{\textcolor{cyan}{\hrule height 2pt width \textwidth}}
\theoremheaderfont{\color{cyan}\normalfont\bfseries}
\theorempostwork{\textcolor{cyan}{\hrule height 2pt width \textwidth}}
\theoremindent10pt
\newtheorem*{culture}{\faWineGlass Culture}
\theoremheaderfont{\color{foreground}\normalfont\bfseries}
\newtheorem{assgprob}{Question}[section]
\renewcommand{\theassgprob}{\arabic{assgprob}}
\newcommand{\Bor}{\mathfrak{Bor}}
\DeclareMathOperator{\SIMP}{SIMP}
\DeclareMathOperator{\STEP}{STEP}
\DeclareMathOperator{\Span}{span}
\DeclareMathOperator{\dist}{dist}
\DeclareMathOperator{\Trig}{Trig}
\DeclareMathOperator{\dm}{dm}
\usepackage{scalerel}
\DeclareMathOperator*{\bigcdot}{\scalerel*{\cdot}{\bigodot}}
% disjoint union
\makeatletter
\providerobustcmd*{\bigcupdot}{%
\mathop{%
\mathpalette\bigop@dot\bigcup
}%
}
\newrobustcmd*{\bigop@dot}[2]{%
\setbox0=\hbox{$\m@th#1#2$}%
\vbox{%
\lineskiplimit=\maxdimen
\lineskip=-0.7\dimexpr\ht0+\dp0\relax
\ialign{%
\hfil##\hfil\cr
$\m@th\cdot$\cr
\box0\cr
}%
}%
}
\makeatother
\begin{document}
\input{latex-classnotes-header.tex}
\chapter*{Preface}%
\label{chp:preface}
\addcontentsline{toc}{chapter}{Preface}
% chapter preface
The pre-requisite to this course is Real Analysis. We will use a lot of the
concepts introduced in Real Analysis, at times without explicitly stating it.
Refer to \href{https://tex.japorized.ink/PMATH351F18/classnotes.pdf}{notes on
PMATH351}.
This course is spiritually broken into 2 pieces:
\begin{itemize}
\item Lebesgue Integration; and
\item Fourier Analysis,
\end{itemize}
which is as the name of the course.
In this set of notes, we use a special topic environment called
\hlnotea{culture} to discuss interesting contents related to the course, but
will not be throughly studied and not tested on exams.
For some unknown reason, mysterious glyphs are replacing common math
characters in an inconsistent way, and I have not the faintest idea
how this is happening, or why this is happening.
The dark version of the notes does not seem to have this problem.
Please use that version of the notes for a cleaner reference.
If you have any idea what is causing the weird glitch,
or a solution, please shoot me an issue at
\url{https://gitlab.com/japorized/TeX_notes/issues}.
% chapter preface (end)
\chapter{Lecture 1 May 07th 2019}%
\label{chp:lecture_1_may_07th_2019}
% chapter lecture_1_may_07th_2019
Since many of our results work for both $\mathbb{C}$ and $\mathbb{R}$, we shall
use $\mathbb{K}$ throughout this course to represent either $\mathbb{C}$ or
$\mathbb{R}$.
\section{Riemannian Integration}%
\label{sec:riemannian_integration}
% section riemannian_integration
\begin{defn}[Norm and Semi-Norm]\index{Norm}\index{Semi-Norm}\label{defn:semi_norm}
Let $V$ be a vector space over $\mathbb{K}$. We define a
\hlnoteb{semi-norm} on $V$ as a function
\begin{equation*}
\nu : V \to \mathbb{R}
\end{equation*}
that satisfies
\begin{enumerate}
\item (\hlnotea{Positive Semi-Definite}) $v(x) \geq 0$ for all $x \in V$;
\label{item:cond1_semi_norm}
\item $\nu(\kappa x) = \abs{\kappa} \nu(x)$ for any $\kappa \in \mathbb{K}$
and $x \in V$; and \label{item:cond2_semi_norm}
\item (\hlnotea{Triangle Inequality}) $\nu(x + y) \leq \nu(x) + \nu(y)$ for
all $x, y \in V$. \label{item:cond3_semi_norm}
\end{enumerate}
If $\nu(x) = 0 \implies x = 0$, then we say that $\nu$ is a \hlnoteb{norm}. In
this case, we usually write $\norm{\cdot}$ to denote the norm, instead of
$\nu$.
\end{defn}
\begin{note}
\begin{itemize}
\item We sometimes call a semi-norm a \hldefn{pseudo-length}.
\end{itemize}
\end{note}
\begin{remark}
Notice that we wrote $\nu(x) = 0 \implies x = 0$ instead of $\nu(x) = 0 \iff x
= 0$. This is because if $z = 0 \in V$, then
\begin{equation*}
v(z) = v(0 z) = 0.
\end{equation*}
\end{remark}
\begin{ex}
Show that if $\nu$ is a semi-norm on a vector space $V$, then $\forall x, y
\in V$,
\begin{equation*}
\abs{\nu(x) - \nu(y)} \leq \nu(x - y).
\end{equation*}
\end{ex}
\begin{proof}
Notice that by condition (\ref{item:cond2_semi_norm}) and
(\ref{item:cond3_semi_norm}), we have
\begin{equation*}
\nu(x - y) \leq \nu(x) + \nu(-y) = \nu(x) - \nu(y),
\end{equation*}
and
\begin{equation*}
\nu(x - y) = -\nu(y - x) \geq - (\nu(y) - \nu(x)) = \nu(x) - \nu(y).
\end{equation*}
It follows that indeed
\begin{equation*}
\abs{\nu(x) - \nu(y)} \leq \nu(x - y).
\end{equation*}
\end{proof}
\begin{eg}
The absolute value $\abs{\cdot}$ is a \hlnotea{norm} on $\mathbb{K}$.
\end{eg}
\begin{eg}[$p$-norms]\label{eg:p_norms}
Consider $N \geq 1$ an integer. We define a family of norms on
\begin{equation*}
\mathbb{K}^N = \underbrace{K \times K \times \hdots \times K}_{N \text{
times }}.
\end{equation*}
\hlbnoteb{$1$-norm}
\begin{equation*}
\norm{(x_n)_{n=1}^{N}}_{1} \coloneqq \sum_{n=1}^{N} \abs{x_n}.
\end{equation*}
\hlbnoteb{Infinity-norm, $\infty$-norm}
\begin{equation*}
\norm{(x_n)_{n=1}^{N}}_{\infty} \coloneqq \max_{1 \leq n \leq N} \abs{x_n}.
\end{equation*}
\hlbnoteb{Euclidean-norm, $2$-norm}
\begin{equation*}
\norm{(x_n)_{n=1}^{N}}_2 \coloneqq \left( \sum_{n=1}^{N} \abs{x_n}^2
\right)^{\frac{1}{2}}
\end{equation*}
It is relatively easy to check that the above norms are indeed norms, except
for the $2$-form. In particular, the \hlnotea{triangle inequality} is not as
easy to show \sidenote{See
\href{https://tex.japorized.ink/PMATH351F18/classnotes.pdf\#thm.29}{Minkowski's
Inequality}.}.
Less obviously so, but true nonetheless, we can define the following $p$-norms
on $\mathbb{K}^N$ :
\begin{equation*}
\norm{(x_n)_{n=1}^N}_p \coloneqq \left( \sum_{n=1}^{N} \abs{x_n}^p
\right)^{\frac{1}{p}},
\end{equation*}
for $1 \leq p < \infty$.
\end{eg}
\begin{culture}
Consider $V = \mathbb{M}_n(\mathbb{C})$, \sidenote{Note that
$\mathbb{M}_n(\mathbb{C})$ is the set of $n \times n$ matrices over
$\mathbb{C}$.} where $n \in \mathbb{N}$ is fixed.
For $T \in \mathbb{M}_n(\mathbb{C})$, we define the \hlnotea{singular numbers}
of $T$ to be
\begin{equation*}
s_1(T) \geq s_2(T) \geq \hdots \geq s_n(T) \geq 0,
\end{equation*}
where $\sigma(T^* T) = \{ s_1(T)^2, s_2(T)^2, \ldots, s_n(T)^2 \}$, including
multiplicity. Then we can define
\begin{equation*}
\norm{T}_p \coloneqq \left( \sum_{i=1}^{n} s_i(T)^p \right)^{\frac{1}{p}}
\end{equation*}
for $1 \leq p < \infty$, which is called the $p$-norm of $T$ on
$\mathbb{M}_n(\mathbb{C})$.
\end{culture}
\begin{eg}
Let
\begin{equation*}
V = \mathcal{C}([0, 1], \mathbb{K}) = \{ f : [0, 1] \to \mathbb{K} \mid f
\text{ is continuous } \}.
\end{equation*}
Then
\begin{equation*}
\norm{f}_{\sup} \coloneqq \sup \{ \abs{f(x)} \mid x \in [0, 1] \}
\end{equation*}
\sidenote{Some authors use $\norm{f}_\infty$, but we will have the notation
$\norm{[f]}_\infty$ later on, and so we shall use $\norm{f}_{\sup}$ for
clarity.} defines a norm on $\mathcal{C}([0, 1], \mathbb{K})$.
A sequence $(f_n)_{n=1)^{\infty}}$ in $V$ converges in this norm to some $f
\in V$, i.e.
\begin{equation*}
\lim_{n \to \infty} \norm{f_n - f}_{\sup} = 0,
\end{equation*}
which means that $(f_n)_{n=1}^{\infty}$ converges uniformly to $f$ on $[0,
1]$.
\end{eg}
\begin{defn}[Normed Linear Space]\index{Normed Linear Space}\label{defn:normed_linear_space}
A \hlnoteb{normed linear space (NLS)} is a pair $(V, \norm{\cdot})$ where $V$
is a vector space over $\mathbb{K}$ and $\norm\cdot$ is a norm on $V$.
\end{defn}
\begin{defn}[Metric]\index{Metric}\label{defn:metric}
Given an NLS $(V, \norm{\cdot})$, we can define a \hldefn{metric} $d$ on $V$ (called the
\hlnotea{metric induced by the norm}) as follows:
\begin{equation*}
d : V \times V \to \mathbb{R} \quad d(x, y) = \norm{x - y},
\end{equation*}
such that
\begin{itemize}
\item $d(x, y) \geq 0$ for all $x, y \in V$ and $d(x, y) = 0 \iff x = y$;
\item $d(x, y) = d(y, x)$; and
\item $d(x, y) \leq d(x, z) + d(y, z)$.
\end{itemize}
\end{defn}
\begin{note}
Norms are all metrics, and so any space that has a norm will induce a metric
on the space.
\end{note}
\begin{defn}[Banach Space]\index{Banach Space}\label{defn:banach_space}
We say that an NLS $(V, \norm{\cdot})$ is \hldefn{complete} or is a
\hlnoteb{Banach Space} if the corresponding $(V, d)$, where $d$ is the metric
induced by the norm, is complete \sidenote{Completeness of a metric space is
such that any of its Cauchy sequences converges in the space.}.
\end{defn}
\begin{eg}
$(\mathcal{C}([0, 1], \mathbb{K}), \norm{\cdot}_{\sup})$ is a Banach space.
\end{eg}
\begin{eg}
We can define a $1$-norm $\norm{\cdot}_1$ on $\mathcal{C}([0, 1], \mathbb{K})$
via
\begin{equation*}
\norm{f}_1 \coloneqq \int_{0}^{1} \abs{f}.
\end{equation*}
Then $(\mathcal{C}([0, 1], \mathbb{K}), \norm{\cdot}_1)$ is an NLS.
\end{eg}
\begin{ex}
Show that $(\mathcal{C}([0, 1], \mathbb{K}), \norm{\cdot}_1)$ is not
complete, which will then give us an example of a \hlimpo{normed linear space
that is not Banach}.
\end{ex}
\begin{proof}
Consider the sequence $(f_n)_{n=1}^{\infty}$ of continuous functions given by
\begin{marginfigure}
\centering
\begin{tikzpicture}
\draw[->] (-0.5, 0) -- (4, 0) node[right] {$x$};
\draw[->] (0, -0.5) -- (0, 2) node[above] {$y$};
\draw[line width=1.5pt,color=blue] (0, 0) -- (0.5, 0) -- (3.5, 1) -- (4, 1);
\draw[line width=1.5pt,color=red] (0, 0) -- (0.5, 0) -- (1.5, 1) -- (4, 1);
\node[circle,fill,inner sep=1pt,label={270:{$\frac{1}{2}$}}] at (0.5, 0) {};
\node[circle,fill,inner sep=1pt,label={270:{$\frac{1}{2} + \frac{1}{m}$}}]
at (1.5, 0) {};
\node[circle,fill,inner sep=1pt,label={270:{$\frac{1}{2} + \frac{1}{n}$}}]
at (3.5, 0) {};
\end{tikzpicture}
\caption{Sequence of functions $(f_n)_{n=1}^{\infty}$. We show for two indices $n < m$.}\label{fig:sequence_of_functions_f_n___n_1_infty_we_show_for_two_indices_n_m_}
\end{marginfigure}
\begin{equation*}
f_n(x) = \begin{cases}
0 & 0 \leq x < \frac{1}{2} \\
n \left( x + \frac{1}{2} \right) & \frac{1}{2} \leq x \leq \frac{1}{2} +
\frac{1}{n} \\
1 & \text{ otherwise }
\end{cases}
\end{equation*}
Note that the sequence $(f_n)_{n=1}^{\infty}$ is indeed \hlnotea{Cauchy}: let
$\epsilon > 0$ and $\abs{n - m} < \frac{\epsilon}{\abs{x - \frac{1}{2}}}$, and
then we have
\begin{align*}
\abs{f_n(x) - f_m(x)}
&= \abs{n \left( x - \frac{1}{2} \right) - m \left( x - \frac{1}{2}\right)}
\\
&= \abs{(n - m) \left( x - \frac{1}{2} \right)}
= \abs{n-m}\abs{x - \frac{1}{2}} < \epsilon.
\end{align*}
However, it is clear that the sequence $(f_n)_{n=1}^{\infty}$ converges to the
piecewise function (in particular, a non-continuous function)
\begin{equation*}
f(x) = \begin{cases}
0 & 0 \leq x < \frac{1}{2} \\
1 & x \geq \frac{1}{2}
\end{cases}.
\end{equation*}
\end{proof}
\begin{eg}
If $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ and $(\mathfrak{Y},
\norm{\cdot}_{\mathfrak{Y}})$ are NLS's, and if $T : \mathfrak{X} \to
\mathfrak{Y}$ is a linear map, we define the \hldefn{operator norm} of $T$ to
be
\begin{equation*}
\norm{T} \coloneqq \sup \{ \norm{T(x)}_{\mathfrak{Y}} \mid
\norm{x}_{\mathfrak{X}} \leq 1 \}.
\end{equation*}
We set
\begin{equation*}
B(\mathfrak{X}, \mathfrak{Y}) \coloneqq
\{ T : \mathfrak{X} \to \mathfrak{Y} \mid T \text{ is linear }, \, \norm{T}
< \infty \}.
\end{equation*}
Note that for any such linear map $T$, $\norm{T} < \infty \iff T$ is
continuous. Thus $B(\mathfrak{X}, \mathfrak{Y})$ is the set of all continuous
functions from $\mathfrak{X}$ into $\mathfrak{Y}$.
Then $(B(\mathfrak{X}, \mathfrak{Y}), \norm{\cdot})$ is an NLS.
\end{eg}
\marginnote{It is likely that we have seen this in Real Analysis.}
\begin{ex}
Show that $(B(\mathfrak{X}, \mathfrak{Y}), \norm{\cdot})$ is complete iff
$(\mathfrak{Y}, \norm{\cdot}_{\mathfrak{Y}})$ is complete.
\end{ex}
\begin{note}
One example of the last example is when $(\mathfrak{Y},
\norm{\cdot}_{\mathfrak{Y}}) = (\mathbb{K}, \abs{\cdot})$. In this case,
$B(\mathfrak{X}, \mathbb{K})$ is known as the \hlnotea{dual space} of
$\mathfrak{X}$, or simple the \hlnotea{dual} of $\mathfrak{X}$.
\end{note}
We are interested in integrating over Banach spaces.
\begin{defn}[Partition of a Set]\index{Partition}\label{defn:partition}
Let $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ be a
\hyperref[defn:banach_space]{Banach space} and $f:[a, b] \to \mathfrak{X}$ a
function, where $a < b \in \mathbb{R}$. A \hlnoteb{partition} $P$ of $[a, b]$
is a finite set
\begin{equation*}
P = \{ a = p_0 < p_1 < \hdots < p_N = b \}
\end{equation*}
for some $N \geq 1$. The set of all partitions of $[a, b]$ is denoted by
$\mathcal{P}[a, b]$.
\end{defn}
\begin{defn}[Test Values]\index{Test Values}\label{defn:test_values}
Let $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ be a
\hyperref[defn:banach_space]{Banach space} and $f:[a, b] \to \mathfrak{X}$ a
function, where $a < b \in \mathbb{R}$. Let $P \in \mathcal{P}[a, b]$. A set
\begin{equation*}
P^* \coloneqq \{ p_k^* \}_{k = 1}^{N}
\end{equation*}
satisfying
\begin{equation*}
p_{k-1} \leq p_k^* \leq p_k, \text{ for } 1 \leq k \leq n
\end{equation*}
is called a set of \hlnoteb{test values} for $P$.
\end{defn}
\begin{defn}[Riemann Sum]\index{Riemann Sum}\label{defn:riemann_sum}
Let $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ be a
\hyperref[defn:banach_space]{Banach space} and $f:[a, b] \to \mathfrak{X}$ a
function, where $a < b \in \mathbb{R}$. Let $P \in \mathcal{P}[a, b]$ and
$P^*$ its corresponding set of test values. We define the \hlnoteb{Riemann
sum} as
\begin{equation*}
S(f, P, P^*) = \sum_{k=1}^{N} f(p_k^*)(p_k - p_{k-1}).
\end{equation*}
\end{defn}
\begin{remark}
\begin{enumerate}
\item Note that because \cref{defn:partition}, $p_k - p_{k-1} > 0$.
\item When $(\mathfrak{X}, \norm{\cdot}) = (\mathbb{R}, \abs{\cdot})$, then
this is the usual Riemann sum from first-year calculus.
\item In general, note that
\begin{equation*}
\frac{1}{b - a} S(f, P, P^*) = \sum_{k=1}^{N} \lambda_k f(p_k^*),
\end{equation*}
where $0 < \lambda_k = \frac{p_k - p_{k-1}}{b - a} < 1$ and \sidenote{via
the fact that the $\lambda_k$'s form a telescoping sum}
\begin{equation*}
\sum_{k=1}^{N} \lambda_k = 1.
\end{equation*}
So $\frac{1}{b - a} S(f, P, P^*)$ is an \hlnotea{averaging} of $f$ over
$[a, b]$. We call $\frac{1}{b - a} S(f, P, P^*)$ the \hldefn{convex
combination} of the $f(p_{k}^*)$'s.
\end{enumerate}
\end{remark}
\begin{eg}[Silly example]
Let $(\mathfrak{X} = \mathcal{C}([-\pi, \pi], \mathbb{K}),
\norm{\cdot}_{\sup})$. Let
\begin{equation*}
f : [0, 1] \to \mathfrak{X} \text{ such that } x \mapsto e^{2\pi x} \sin
7 \theta + \cos x \cos (12 \theta),
\end{equation*}
where $\theta \in [-\pi, \pi]$. Now if we consider the partition
\begin{equation*}
P = \left\{ - \pi, \frac{1}{10}, \frac{1}{2}, \pi \right\}
\end{equation*}
and its corresponding test value
\begin{equation*}
P^* = \left\{ 0, \frac{1}{3}, 2 \right\},
\end{equation*}
then
\begin{align*}
S(f, P, P^*)
&= f(0) \left( \frac{1}{10} + \pi \right)
+ f \left( \frac{1}{3} \right) \left( \frac{1}{2} - \frac{1}{10} \right)
+ f(2) \left( \pi - \frac{1}{2} \right) \\
&= (\sin 7 \theta + \cos 12 \theta) \left( \pi + \frac{1}{10} \right) \\
&\quad + \left( e^{\frac{2\pi}{3}} \sin 7\theta +
\cos\frac{1}{3} \cos 12\theta \right) \left( \frac{2}{5} \right) \\
&\quad + ( e^{4 \pi} \sin 7\theta + \cos 2 \cos 12\theta ) \left( \pi -
\frac{1}{2} \right)
\end{align*}
\end{eg}
\begin{defn}[Refinement of a Partition]\index{Refinement}\label{defn:refinement_of_a_partition}
Let $a < b \in \mathbb{R}$, and $P \in \mathcal{P}[a, b]$. We say $Q$ is a
\hlnoteb{refinement} of $P$ is $Q \in \mathcal{P}[a, b]$ and $P \subseteq Q$.
\end{defn}
\begin{note}
In simpler words, $Q$ is a ``finer'' partition that is based on $P$.
\end{note}
\begin{defn}[Riemann Integrable]\index{Riemann Integrable}\label{defn:riemann_integrable}
Let $a < b \in \mathbb{R}$, $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ be a
Banach space and $f : [a, b] \to \mathfrak{X}$ be a function. We say that $f$
is \hlnoteb{Riemann integrable} over $[a, b]$ if $\exists x_0 \in
\mathfrak{X}$ such that
\begin{equation*}
\forall \epsilon > 0 \quad \exists P \in \mathcal{P}[a, b],
\end{equation*}
such that if $Q$ is any refinement of $P$, and $Q^*$ is any set of test values
of $Q$, then
\begin{equation*}
\norm{x_0 - S(f, Q, Q^*)}_{\mathfrak{X}} < \epsilon.
\end{equation*}
In this case, we write
\begin{equation*}
\int_{a}^{b} f = x_0.
\end{equation*}
\end{defn}
\begin{propo}[Uniqueness of the Riemann Integral]\label{propo:uniqueness_of_the_riemann_integral}
If $f$ is Riemann integrable over $[a, b]$, then the value of $\int_{a}^{b} f$
is unique.
\end{propo}
\begin{proof}
Suppose not, i.e.
\begin{equation*}
\int_{a}^{b} f = x_0 \text{ and } \int_{a}^{b} f = y_0
\end{equation*}
for some $x_0 \neq y_0$. Then, let
\begin{equation*}
\epsilon = \frac{\norm{x_0 - y_0}}{2},
\end{equation*}
which is $> 0$ since $\norm{x_0 - y_0} > 0$. Let $P_{x_0}, P_{y_0} \in
\mathcal{P}[a, b]$ be partitions corresponding to $x_0$ and $y_0$ as in the
definition of Riemann integrability.
Then, let $R = P_{x_0} \cup P_{y_0}$, so that $R$ is a \hldefn{common
refinement} of $P_{x_0}$ and $P_{y_0}$. If $Q$ is any refinement of $R$, then
$Q$ is also a common refinement of $P_{x_0}$ and $P_{y_0}$. Then for any test
values $Q^*$ of $Q$, we have
\begin{align*}
2 \epsilon &= \norm{x_0 - y_0} \\
&\leq \norm{x_0 - S(f, Q, Q^*)} + \norm{S(f, Q, Q^*) - y_0} <
\epsilon + \epsilon = 2 \epsilon,
\end{align*}
which is a contradiction.
Thus $x_0 = y_0$ as required.
\end{proof}
\begin{thm}[Cauchy Criterion of Riemann Integrability]\index{Cauchy Criterion of Riemann Integrability}\label{thm:cauchy_criterion_of_riemann_integrability}
Let $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ be a Banach space, $a < b
\in \mathbb{R}$ and $f : [a, b] \to \mathfrak{X}$ be a function. TFAE:
\begin{enumerate}
\item $f$ is Riemann integrable over $[a, b]$;
\item $\forall \epsilon > 0, \, R \in \mathcal{P}[a, b]$, if $P, Q$ is any
refinement of $R$, and $P^*$ (respectively $Q^*$) is any test values of
$P$ (respectively $Q$), then
\begin{equation*}
\norm{S(f, P, P^*) - S(f, Q, Q^*)}_{\mathfrak{X}} < \epsilon.
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\hlbnoted{$\implies$} This is a rather straightforward proof. Suppose $P, Q
\in \mathcal{P}[a, b]$ is some refinement of the given partition $R \in
\mathcal{P}[a, b]$, and $P^*, Q^*$ any test values for $P, Q$, respectively.
Then by assumption and \cref{propo:uniqueness_of_the_riemann_integral},
$\exists x_0 \in \mathfrak{X}$ such that
\begin{equation*}
\norm{x_0 - S(f, P, P^*)}_{\mathfrak{X}} < \frac{\epsilon}{2} \text{ and }
\norm{x_0 - S(f, Q, Q^*)}_{\mathfrak{X}} < \frac{\epsilon}{2}.
\end{equation*}
It follows that
\begin{align*}
&\norm{S(f,P,P^*) - S(f,Q,Q^*)}_{\mathfrak{X}} \\
&\leq \norm{x_0 - S(f,P,P^*)}_{\mathfrak{X}} + \norm{x_0 -
S(f,Q,Q^*)}_{\mathfrak{X} } \\
&< \frac{\epsilon}{2} + \frac{\epsilon}{2} = \epsilon.
\end{align*}
\noindent
\hlbnoted{$\impliedby$} By hypothesis, wma $\epsilon = \frac{1}{n}$ for some
$n \geq 1$, such that if $P, Q$ are any refinements of the partition $R_n \in
\mathcal{P}[a, b]$, and $P^*, Q^*$ are the respective arbitrary test values,
then
\begin{equation*}
\norm{S(f,P,P^*) - S(f,Q,Q^*)}_{\mathfrak{X}} < \frac{1}{n}
\end{equation*}
Now for each $n \geq 1$, define
\begin{equation*}
W_n \coloneqq \bigcup_{k=1}^{n} R_k \in \mathcal{P}[a, b],
\end{equation*}
so that $W_n$ is a common refinement for $R_1, R_2, \ldots, R_n$. For each $n
\geq 1$, let $W_n^*$ be an arbitrary set of test values for $W_n$. For
simplicity, let us write
\begin{equation*}
x_n = S(f, \, W_n, \, W_n^*), \text{ for each } n \geq 1.
\end{equation*}
\sidenote{Note that it would be nice if for the finer and finer partitions
that we have constructed, i.e. the $W_n$'s, give us a convergent sequence of
Riemann sums, since it makes sense that this convergence will give us the final
value that we want.}
\noindent
\hlbnotec{Claim: $(x_n)_{n=1}^{\infty}$ is a Cauchy sequence}
If $n_1 \geq n_2 > N \in \mathbb{N}$, then
\begin{align*}
\norm{x_{n_1} - x_{n_2}}_{\mathfrak{X}} &= \norm{S(f,W_{n_1},W_{n_1}^*) -
S(f,W_{n_2},W_{n_2}^*)} < \frac{1}{N}
\end{align*}
by our assumption, since $W_{n_1}, W_{n_2}$ are refinements of $R_N$. Then by
picking $N = \frac{1}{\epsilon}$ for any $\epsilon > 0$, we have that
$(x_n)_{n=1}^{\infty}$ is indeed a Cauchy sequence in $\mathfrak{X}$.
Since $\mathfrak{X}$ is a Banach space, it is complete, and so $\exists x_0
\coloneqq \lim\limits_{n \to \infty} x_n \in \mathfrak{X}$. It remains to show
that, indeed,
\begin{equation*}
x_0 = \int_{a}^{b} f.
\end{equation*}
Let $\epsilon > 0$, and choose $N \geq 1$ such that
\begin{itemize}
\item $\frac{1}{N} < \frac{\epsilon}{2}$ ; and
\item $k \geq N$ implies that $\norm{x_k - x_0} < \frac{\epsilon}{2}$.
\end{itemize}
Then suppose that $V$ is any refinement of $W_N$, and $V^*$ is an arbitrary
set of test values of $V$. Then we have
\begin{align*}
\norm{x_0 - S(f,V,V^*)}_{\mathfrak{X}}
&\leq \norm{x_0 - x_N}_{\mathfrak{X}} + \norm{x_N -
S(f,V,V^*)}_{\mathfrak{X}} \\
&< \frac{\epsilon}{2} + \norm{S(f,W_N,W_N^*) - S(f,V,V^*)}_{\mathfrak{X}} \\
&<\frac{\epsilon}{2} + \frac{1}{N} \leq \frac{\epsilon}{2} +
\frac{\epsilon}{2} = \epsilon.
\end{align*}
It follows that
\begin{equation*}
\int_{a}^{b} f = x_0,
\end{equation*}
as desired.
\end{proof}
In first-year calculus, all continuous functions over $\mathbb{R}$ are
integrable. A similar result holds in Banach spaces as well.
In the next lecture, we shall prove the following theorem.
\begin{thmnonum}[Continuous Functions are Riemann Integrable]
Let $(\mathfrak{X}, \norm{\cdot})$ be a Banach space and $a < b \in
\mathbb{R}$. If $f : [a, b] \to \mathfrak{X}$ is continuous, then $f$ is
Riemann integrable over $[a, b]$.
\end{thmnonum}
% section riemannian_integration (end)
% chapter lecture_1_may_07th_2019 (end)
\chapter{Lecture 2 May 9th 2019}%
\label{chp:lecture_2_may_9th_2019}
% chapter lecture_2_may_9th_2019
\section{Riemannian Integration (Continued)}%
\label{sec:riemannian_integration_continued}
% section riemannian_integration_continued
We shall now prove the last theorem stated in class.
\begin{thm}[Continuous Functions are Riemann Integrable]\label{thm:continuous_functions_are_riemann_integrable}
Let $(\mathfrak{X}, \norm{\cdot})$ be a Banach space and $a < b \in
\mathbb{R}$. If $f : [a, b] \to \mathfrak{X}$ is continuous, then $f$ is
Riemann integrable over $[a, b]$.
\end{thm}
\begin{strategy}
This is rather routine should one have gone through a few courses on analysis,
and especially on introductory courses that involves Riemannian integration.
We shall show that if $P_N \in \mathcal{P}[a, b]$ is a partition of $[a, b]$
into $2^N$ subintervals of equal length $\frac{b - a}{2^N}$, and if we use
$P_N^* = P_n \setminus \{ a \}$ as the set of test values for $P_N$, which
consists of the right-endpoints of each the subintervals in $P_N$, then the
sequence $(S(f, P_N, P_N^*))_{N = 1}^{\infty}$ converges in $\mathfrak{X}$ to
$\int_{a}^{b} f$.
Note that this choice of partition is a valid move, since any of these
$P_N$'s, for different $N$'s, is a refinement of some other partition of $[a,
b]$, and if we choose a different set of test values, then we may as well
consider an even finer partition.
\end{strategy}
\begin{proof}
First, note that since $[a, b]$ is closed and bounded in $\mathbb{R}$, it is
compact. Also, we have that $X$ is a metric space (via the metric induced by
the norm). This means that \hlimpo{any continuous function $f$ on $[a, b]$ is
uniformly continuous on $[a, b]$}. In other words,
\begin{gather*}
\forall \epsilon > 0 \enspace \exists \delta > 0 \enspace \forall x, y \in [a, b] \\
\abs{x - y} < \delta \implies \norm{f(x) - f(y)} < \frac{\epsilon}{2(b - a)}.
\end{gather*}
\noindent
\hlbnoted{Claim: $(S(f, P_N, P_N^*))_{N=1}^{\infty}$ is Cauchy}
Now by picking $P_N \in \mathcal{P}[a, b]$ and set of test values $P_N^*$ as
described in the strategy above, we proceed by picking $M > 0$ such that
$\frac{b - a}{2^M} < \delta$. Then for any $K \geq L \geq M$, since each of
the subintervals have length $\frac{b - a}{2^L}$ and $\frac{b - a}{2^K}$ for
$P_L$ and $P_K$ respectively, if we write
\begin{equation*}
P_L = \{ a = p_0 < p_1 < \hdots < p_{2^L} = b \}
\end{equation*}
and
\begin{equation*}
P_K = \{ a = q_0 \leq q_1 < \hdots < q_{2^K} = b \},
\end{equation*}
then $p_j = q_j 2^{K-L}$ \sidenote{This is not immediately clear on first
read. Think of $a$ as $0$.} for all $0 \leq j \leq 2^L$. By uniform
continuity, for $1 \leq j \leq 2^L$, wma
\begin{equation*}
\norm{f(p_j^*) - f(q_s^*)} < \frac{\epsilon}{2(b - a)}, \text{ where } (j-1)
2^{K-L} < s \leq j 2^{K-L}.
\end{equation*}
We can see that
\begin{align*}
&\norm{S(f, P_L, P_L^*) - S(f, P_K, P_K^*)} \\
&= \norm{\sum_{j=1}^{2^L} \sum_{s=(j-1)2^{K-L} + 1}^{j 2^{K-L}}
(f(p_j) - f(q_s))(q_s - q_{s-1})} \\
&\leq \sum_{j=1}^{2^L} \sum_{s=(j-1) 2^{K-L} + 1}^{j 2^{K-L}}
\norm{f(p_j) - f(q_s)} (q_s - q_{s-1}) \\
&\leq \sum_{j=1}^{2^L} \sum_{s=(j-1) 2^{K-L} + 1}^{j 2^{K-L}}
\frac{\epsilon}{b - a} (q_s - q_{s-1}) \\
&= \frac{\epsilon}{b - a} \sum_{s=1}^{2^K} (q_s - q_{s-1}) \\
&= \frac{\epsilon}{2(b - a)} (b - a) = \frac{\epsilon}{2}.
\end{align*}
This proves our claim.
Since $\mathfrak{X}$ is a Banach space, and hence complete, we have that the
sequence $(S(f, P_N, P_N^*))_{N=1}^{\infty}$ has a limit $x_0 \in \mathfrak{X}$.
It remains to show that $\int_{a}^{b} f = x_0$. \sidenote{The rest of this
proof is similar to the above proof.}
Let $\epsilon > 0$, and choose $T \geq 1$ such that $\frac{b - a}{2^T} <
\delta$ \sidenote{Note that this is still the same $\delta$ as in the first
$\delta$ in this entire proof.}, so that we have
\begin{equation*}
\norm{x_0 - S(f, P_T, P_T^*)} < \frac{\epsilon}{2}.
\end{equation*}
Now let $R = \{a = r_0 < r_1 < \hdots < r_J = b \} \in \mathcal{P}[a, b]$ such
that $P_T \subseteq R$. Then there exists a sequence
\begin{equation*}
0 = j_0 < j_1 < \hdots < j_{2^T} = J
\end{equation*}
such that
\begin{equation*}
r_{j_k} = p_k, \text{ where } 0 \leq k \leq 2^T.
\end{equation*}
Let $R^*$ be any set of test values of $R$. Note that for $j_{k-1} \leq s \leq
j_k$, it is clear that
\begin{equation*}
\abs{p_k^* - r_s^*} \leq \abs{p_k - p_{k-1}} = \frac{b-a}{2^T} < \delta.
\end{equation*}
Thus
\begin{align*}
&\norm{S(f, P_T, P_T^*) - S(f, R, R^*)} \\
&\leq \sum_{k=1}^{2^T} \sum_{s_{j_{k-1}+1}}^{j_k} \norm{f(p_k^*) -
f(r_s^*)}(r_s - r_{s - 1}) \\
&< \frac{\epsilon}{2(b-a)} \sum_{k=1}^{2^T} \sum_{s_{j_{k-1}+1}}^{j_k}
(r_s - r_{s-1}) \\
&= \frac{\epsilon}{2(b - a)}(b - a) = \frac{\epsilon}{2}.
\end{align*}
Putting everything together, we have
\begin{align*}
&\norm{x_0 - S(f, R, R^*)} \\
&\leq \norm{x_0 - S(f, P_T, P_T^*)} + \norm{S(f, P_T, P_T^*) - S(f, R, R^*)}
\\
&< \frac{\epsilon}{2} + \frac{\epsilon}{2} = \epsilon.
\end{align*}
We can also find another refinement of $P_T$, say $Q$, that works similarly
as in the case of $R$. It follows from
\cref{thm:cauchy_criterion_of_riemann_integrability} that
\begin{equation*}
x_0 = \int_{a}^{b} f,
\end{equation*}
i.e. that $f$ is indeed Riemann integrable over $[a, b]$.
\end{proof}
The following is a corollary whose proof shall be left as an exercise.
\begin{crly}[Piecewise Functions are Riemann Integrable]\label{crly:piecewise_functions_are_riemann_integrable}
A \hlnotea{piecewise continuous} function is also Riemann
integrable: if $f : [a, b] \to \mathfrak{X}$ is piecewise continuous, then $f$
is Riemann integrable.
\end{crly}
\begin{ex}
Prove \cref{crly:piecewise_functions_are_riemann_integrable}.
\end{ex}
\newthought{Let us} exhibit a function that is not Riemann integrable.
\begin{defn}[Characteristic Function]\index{Characteristic Function}\label{defn:characteristic_function}
Given a subset $E$ of a set $\mathbb{R}$, we define the \hlnoteb{characteristic
function} of $E$ as a function $\chi_E : \mathbb{R} \to \mathbb{R}$ given by
\begin{equation*}
\chi_E(x) = \begin{cases}
1 & x \in E \\
0 & x \notin E
\end{cases}.
\end{equation*}
\end{defn}
\begin{eg}
Consider the set $E = \mathbb{Q} \cap [0, 1] \subseteq \mathbb{R}$. Let $P \in
\mathcal{P}[0, 1]$ such that
\begin{equation*}
P = \{ 0 = p_0 < p_1 < \hdots < p_N = 1 \},
\end{equation*}
and let
\begin{equation*}
P^* = \{ p_k^* \}_{k=1}^{N} \text{ and } P^{**} = \{ p_k^{**} \}_{k=1}^{N}
\end{equation*}
be 2 sets of test values for $P$, such that we have
\begin{equation*}
p_k^* \in \mathbb{Q} \text{ and } p_k^{**} \in \mathbb{R} \setminus
\mathbb{Q}.
\end{equation*}
Then we have
\begin{align*}
S(\chi_E, P, P^*) &= \sum_{k=1}^{N} \chi_E(p_k^*)(p_k - p_{k-1}) \\
&= \sum_{k=1}^{N} 1 \cdot (p_k - p_{k-1}) \\
&= p_N - p_0 = 1 - 0 = 1,
\end{align*}
and
\begin{align*}
S(\chi_E, P, P^{**}) &= \sum_{k=1}^{N} \chi_E(p_k^{**})(p_k - p_{k-1}) \\
&= \sum_{k=1}^{N} 0 \cdot (p_k - p_{k-1}) \\
&= 0.
\end{align*}
It is clear that the
\hyperref[thm:cauchy_criterion_of_riemann_integrability]{Cauchy criterion}
fails for $\chi_E$. This shows that $\chi_E$ is not Riemann integrable.
\end{eg}
\begin{remark}
Let us once again consider $E = \mathbb{Q} \cap [0, 1]$. Note that $E$ is
\hldefn{denumerable} \sidenote{This means that $E$ is countably infinite.}. We
may thus write
\begin{equation*}
E = \{ q_n \}_{n=1}^{\infty}.
\end{equation*}
Now, for $k \geq 1$, define
\begin{equation*}
f_k(x) = \sum_{n=1}^{k} \chi_{\{q_n\}}(x).
\end{equation*}
In other words, $f_k = \chi_{\{q_1, \ldots, q_k\}}$. Furthermore, we have that
\begin{equation*}
f_1 \leq f_2 \leq f_3 \hdots \leq \chi_E.
\end{equation*}
Moreover, we have that $\forall x \in [0, 1]$,
\begin{equation*}
\chi_E(x) = \lim_{k \to \infty} f_k(x),
\end{equation*}
and
\begin{equation*}
\int_{0}^{1} f_k = 0 \text{ for all } k \geq 1.
\end{equation*}
And yet, we have that $\int_{0}^{1} \chi_E$ does not exist!
\end{remark}
\newthought{We want to} develop a different integral that will `cover' for this
`pathological' behavior of where the Riemann integral fails.
The \hlbnotec{rough idea} is as follows.
In Riemann integration, when integrating over an interval $[a, b]$, we
partitioned $[a, b]$ into subintervals. This happens on the $x$-axis.
\begin{figure*}[ht]
\centering
\begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-1,xscale=1]
\draw (50,259.62) -- (603.5,259.62)(105.35,13.92) -- (105.35,286.92) (596.5,254.62) -- (603.5,259.62) -- (596.5,264.62) (100.35,20.92) -- (105.35,13.92) -- (110.35,20.92) ;
\draw (170.33,259.67) -- (500.33,259.67) (224.33,255.67) -- (224.33,263.67)(278.33,255.67) -- (278.33,263.67)(332.33,255.67) -- (332.33,263.67)(386.33,255.67) -- (386.33,263.67)(440.33,255.67) -- (440.33,263.67)(494.33,255.67) -- (494.33,263.67) ;
\draw (157.33,187.67) .. controls (179.33,108.67) and (470.33,182.67) .. (543.33,74.67) ;
\draw (224.53,148.98) -- (278.53,148.98) -- (278.53,259.38) -- (224.53,259.38) -- cycle ;
\draw (278.53,142.98) -- (332.53,142.98) -- (332.53,259.98) -- (278.53,259.98) -- cycle ;
\draw (440.53,127.48) -- (494.53,127.48) -- (494.53,259.48) -- (440.53,259.48) -- cycle ;
\draw (224,274) node [align=left] {$\displaystyle a$};
\draw (494,277) node [align=left] {$\displaystyle b$};
\draw (278,273) node [align=left] {$\displaystyle p_{1}$};
\draw (331,274) node [align=left] {$\displaystyle p_{2}$};
\draw (441,274) node [align=left] {$\displaystyle p_{N-1}$};
\draw (385,270) node [align=left] {$\displaystyle \dotsc $};
\draw (241,147) node [align=left] {$\times$};
\draw (322,142) node [align=left] {$\times$};
\draw (450,126) node [align=left] {$\times$};
\draw (242,126.92) node [align=left] {$\displaystyle p^{*}_{0}$};
\draw (322,122.92) node [align=left] {$\displaystyle p^{*}_{1}$};
\draw (450,105.92) node [align=left] {$\displaystyle p^{*}_{N-1}$};
\end{tikzpicture}
\caption{Rough illustration of how Riemann's integration works}
\label{fig:rough_illustration_of_how_riemann_s_integration_works}
\end{figure*}
In each of the subintervals of the partition, we pick out a \hlnotea{test
value} $p_i^*$, and basically draw a rectangle with base at $[p_i, p_{i+1}]$ and
height from $0$ to $p_i^*$.
What we shall do now is that we \hlwarn{partition the range of $f$ on the
$y$-axis}, instead of the $x$-axis as we do in Riemannian integration.
In particular, given a function $f : [a, b] \to \mathbb{R}$, we first partition
the range of $f$ into subintervals $[y_{k-1}, y_k]$, where $1 \leq k \leq N$.
Then, we set
\begin{equation*}
E_k = \{ x \in [a, b] : f(x) \in [y_{k-1}, y_k] \} \text{ for } 1 \leq k \leq N.
\end{equation*}
\begin{figure*}[ht]
\centering
\includegraphics[width=0.5\linewidth]{images/heuristic_into_lesbesgue_integration.png}
\caption{A sketch of what's happening with the construction of the $E_k$'s}
\label{fig:a_sketch_of_what_s_happening_with_the_construction_of_the_e_k_s}
\end{figure*}
This will then allow us to estimate the integral of $f$ over $[a, b]$ by the
expression
\begin{equation*}
\sum_{k=1}^{N} y_k m E_k,
\end{equation*}
where each of the $y_k m E_k$ are called \hlnotea{simple functions}. In the
expression, $mE_k$ denotes a ``measure'' \sidenote{Note that a measure is simply
a generalization of the notion of `length'.} of $E_k$.
\begin{figure*}[ht]
\centering
\includegraphics[width=0.5\linewidth]{images/heuristic_into_lesbesgue_integration_heaps.png}
\caption{Drawing out the rectangles of $y_k mE_k$ from
\cref{fig:a_sketch_of_what_s_happening_with_the_construction_of_the_e_k_s}.}
\label{fig:drawing_out_the_rectangles_of_y_k_me_k_from_fig_}
\end{figure*}
We observe that $E_k$ need not be a particularly well-behaved set. However, note
that we may rearrange the possibly scattered pieces of each $E_k$ together, so
as to form a `continuous' base for the rectangle. We need our definition of a
measure to be able to capture this.
The following is an analogy from Lebesgue himself on comparing Lebesgue
integration and Riemann integration \cite{siegmund2008}:
\begin{quotebox}{magenta}{foreground}
I have to pay a certain sum, which I have collected in my pocket. I take the
bills and coins out of my pocket and give them to the creditor in the order I
find them until I have reached the total sum. This is the Riemann integral.
But I can proceed differently. After I have taken all the money out of my
pocket I order the bills and coins according to identical values and then I
pay the several heaps one after the other to the creditor. This is my
integral.
\end{quotebox}
The insight here is that one can \hlbnotea{freely arrange} the values of the
functions, all the while \hlbnotea{preserving} the value of the integral.
\begin{itemize}
\item This requires us to have a better understanding of what a measure is.
\item This process of rearrangement converts certain functions which are
extremely difficult to deal with, or outright impossible, with the Riemann
integral, into easily digestible pieces using Lebesgue integral.
\end{itemize}
% section riemannian_integration_continued (end)
\section{Lebesgue Outer Measure}%
\label{sec:lebesgue_outer_measure}
% section lebesgue_outer_measure
\paragraph{Goals of the section}
\begin{enumerate}
\item Define a ``measure of length'' on as many subsets of $\mathbb{R}$ as
possible.
\item The definition should agree with our intuition of what a `length' is.
\end{enumerate}
\begin{defn}[Length]\index{Length}\label{defn:length}
For $a \leq b \in \mathbb{R}$, we define the \hlnoteb{length} of the interval
$(a, b)$ to be $b - a$, and we write
\begin{equation*}
\ell((a, b)) \coloneqq b - a.
\end{equation*}
We also define
\begin{itemize}
\item $\ell(\emptyset) = 0$; and
\item $\ell((a, \infty)) = \ell((-\infty, b)) = \ell((-\infty, \infty)) =
\infty$.
\end{itemize}
\end{defn}
\begin{defn}[Cover by Open Intervals]\index{Cover by Open Intervals}\label{defn:cover_by_open_intervals}
Let $E \subseteq \mathbb{R}$. A countable collection $\{ I_n
\}_{n=1}^{\infty}$ of open intervals is said to be a \hlnoteb{cover of $E$ by
open intervals} if $E \subseteq \bigcup_{n=1}^{\infty} I_n$.
\end{defn}
\begin{note}
In this course, the only covers that we shall use are \hlbnotec{open
intervals}, and so we shall henceforth refer to the above simply as covers of
$E$.
\end{note}
Before giving what immediately follows from the above, I shall present the
following notion of an outer measure.
\begin{defn}[Outer Measure]\index{Outer Measure}\label{defn:outer_measure}
Let $\emptyset \neq X$ be a set. An \hlnoteb{outer measure} $\mu$ on $X$ is a
function
\begin{equation*}
\mu : \mathcal{P}(X) \to [0, \infty] \coloneqq [0, \infty) \cup \{ \infty \}
\end{equation*}
which satisfies
\begin{marginfigure}
\centering
\includegraphics[width=0.8\marginparwidth]{images/outermeasure.png}
\caption{Idea of the outer measure}\label{fig:idea_of_the_outer_measure}
\end{marginfigure}
\begin{enumerate}
\item $\mu \emptyset = 0$;
\item (\hldefn{monotone increment} or \hldefn{monotonicity}) $E \subseteq F
\subseteq X \implies \mu E \leq \mu F$; and
\item (\hldefn{countable subadditivity} or
\hldefn{$\sigma$-subadditivity}) $\{ E_n \}_{n=1}^{\infty} \subseteq
\mathcal{P}(X)$
\begin{equation*}
\mu \left( \bigcup_{n=1}^{\infty} E_n \right) \leq \sum_{n=1}^{\infty}
\mu E_n.
\end{equation*}
\end{enumerate}
\end{defn}
\begin{note}
Note that by the monotonicity, the $\sigma$-subadditivity condition is
equivalent to: given $\{ E_n \}_{n=1}^{\infty} \subseteq \mathcal{P}(X)$ and
$F \subseteq \bigcup_{n=1}^{\infty} E_n$, we have that
\begin{equation*}
\mu(F) \leq \sum_{n=1}^{\infty} \mu(E_n).
\end{equation*}
\end{note}
\begin{defn}[Lebesgue Outer Measure]\index{Lebesgue Outer Measure}\label{defn:lebesgue_outer_measure}
We define the \hlnoteb{Lebesgue outer measure} as a function $m^* :
\mathcal{P}(X) \to \mathbb{R}$ such that
\begin{equation*}
m^* E \coloneqq \inf \left\{ \sum_{n=1}^{\infty} \ell(I_n) : E \subseteq
\bigcup_{n=1}^{\infty} I_n \right\}.
\end{equation*}
\end{defn}
We cheated a little bit by calling the above an outer measure, so let us now
justify our cheating.
\begin{propo}[Validity of the Lebesgue Outer Measure]\label{propo:validity_of_the_lebesgue_outer_measure}
$m^*$ is indeed an outer measure.
\end{propo}
\begin{proof}
\hlbnoted{$\mu \emptyset = 0$} We consider a sequence of sets $\{ I_n
\}_{n=1}^{\infty}$ such that $I_n = \emptyset$ for each $n = 1, \ldots,
\infty$. It is clear that $\emptyset \subseteq \bigcup_{n=1}^{\infty} I_n$.
Also, we have that $\ell(I_n) = 0$ for all $n = 1, \ldots, \infty$. It follows
that
\begin{equation*}
0 \leq m^*(\emptyset) \leq \sum_{n=1}^{\infty} m^* (I_n) =
\sum_{n=1}^{\infty} 0 = 0,
\end{equation*}
where the inequality is simply by the definition of $m^*$ being an infimum,
\hlimpo{not to be confused with $\sigma$-subadditivity}. We thus have that
\begin{equation*}
m^*(\emptyset) = 0.
\end{equation*}
\noindent
\hlbnoted{Monotonicity} Suppose $E \subseteq F \subseteq
\mathbb{R}$, and $\{ I_n \}_{n=1}^{\infty}$ a cover of $F$. Then
\begin{equation*}
E \subseteq F \subseteq \bigcup_{n=1}^{\infty} I_n.
\end{equation*}
In particular, all covers of $F$ are also covers of $E$, i.e.
\begin{equation*}
\left\{ \{ J_m \}_{m=1}^{\infty} : E \subseteq \bigcup_{m=1}^{\infty} J_m \right\}
\subseteq
\left\{ \{ I_n \}_{n=1}^{\infty} : F \subseteq \bigcup_{n=1}^{\infty} I_n \right\}.
\end{equation*}
It follows that
\begin{equation*}
m^* E \leq m^* F.
\end{equation*}
\noindent
\hlbnoted{$\sigma$-subaddivitity} Consider $\{ E_n \}_{n=1}^{\infty} \subseteq
\mathcal{P}(X)$ such that $E \subseteq \bigcup_{n=1}^{\infty} E_n$. WTS
\begin{equation*}
m^* E \leq \sum_{n=1}^{\infty} m^* E_n.
\end{equation*}
Now if the sum of the RHS is infinite, i.e. if any of the $m^* E_n$ is
infinite, then the inequality comes for free. Thus WMA $\sum_{n=1}^{\infty}
E_n < \infty$, and in particular that $m^* E_n < \infty$ for all $n = 1,
\ldots, \infty$.
To do this, let $\epsilon > 0$. Since $m^* E_n < \infty$ for all $n$, we can
find covers $\left\{ I_k^{(n)} \right\}_{k=1}^{\infty}$ for each of the
$E_n$'s such that
\begin{equation*}
\sum_{k=1}^{\infty} \ell\left(I_k^{(n)}\right) < m^* E_n +
\frac{\epsilon}{2^n}.
\end{equation*}
Then, we have that
\begin{equation*}
E \subseteq \bigcup_{n=1}^{\infty} E_n \subseteq \bigcup_{n=1}^{\infty}
\bigcup_{k=1}^{\infty} I_k^{(n)}.
\end{equation*}
Then by $m^* E$ being the infimum of the sum of lengths of the covering
intervals, we have that
\begin{align*}
m^* E &\leq \sum_{n=1}^{\infty} \sum_{k=1}^{\infty} \ell \left( I_k^{(n)}
\right) \\
&\leq \sum_{n=1}^{\infty} \left( m^* E_n + \frac{\epsilon}{2^n}
\right) \\
&= \sum_{n=1}^{\infty} m^* E_n + \sum_{n=1}^{\infty}
\frac{\epsilon}{2^n} \\
&= \sum_{n=1}^{\infty} m^* E_n + \epsilon.
\end{align*}
Since $\epsilon$ was arbitrary, we have that
\begin{equation*}
m^* E_n \leq \sum_{n=1}^{\infty} m^* E_n,
\end{equation*}
as desired.
\end{proof}
\begin{crly}[Lebesgue Outer Measure of Countable Sets is Zero]\label{crly:lebesgue_outer_measure_of_countable_sets_is_zero}
If $E \subseteq \mathbb{R}$ is countable, then $m^* E = 0$.
\end{crly}
\begin{proof}
We shall prove for when $E$ is denumerable, for the finite case follows a
similar proof. Let us write $E = \{ x_n \}_{n=1}^{\infty}$. Let $\epsilon > 0$
and
\begin{equation*}
I_n = \left( x_n - \frac{\epsilon}{2^{n+1}},\, x_n + \frac{\epsilon}{2^{n+1}}
\right).
\end{equation*}
Then it is clear that $\{ I_n \}_{n=1}^{\infty}$ is a cover of $E$.
It follows that
\begin{equation*}
0 \leq m^* E \leq \sum_{n=1}^{\infty} \ell (I_n) = \sum_{n=1}^{\infty}
\frac{\epsilon}{2^n} = \epsilon.
\end{equation*}
Thus as $\epsilon \to 0$, we have that
\begin{equation*}
m^* E = 0,
\end{equation*}
as expected.
\end{proof}
\begin{crly}[Lebesgue Outer Measure of $\mathbb{Q}$ is Zero]\label{crly:lebesgue_outer_measure_of_q_is_zero}
We have that $m^* \mathbb{Q} = 0$.
\end{crly}
\newthought{In the proofs} above that we have looked into, and based on the
intuitive notion of the length of an open interval, it is compelling to simply
conclude that
\begin{equation*}
m^* (a, b) = \ell(a, b) = b - a.
\end{equation*}
However, looking back at \cref{defn:lebesgue_outer_measure}, we know that that
is not how $m^* (a, b)$ is defined.
This leaves us with an interesting question:
\begin{quotebox}{green}{foreground}
how does our notion of measure $m^*(a, b)$ of an interval compare with the
notion of the length of an interval?
\end{quotebox}
By taking $I_1 = (a, b)$ and $I_n = \emptyset$ for $n \geq 2$, it is rather
clear that $\{ I_n \}_{n=1}^{\infty}$ is a cover of $(a, b)$, and so we have
\begin{equation}\label{eq:lebesgue_outer_measure_leq_length}
m^* (a, b) \leq \ell(a, b) = b - a.
\end{equation}
However, the other side of the game is not as easy to confirm: we would have to
consider \hlimpo{all} possible covers of $(a, b)$, which is a lot.
Another question that we can ask ourselves seeing
\cref{eq:lebesgue_outer_measure_leq_length} is why can't $m^*(a, b)$ be
something that is strictly less than the length to give us an even more
`precise' measurement?
To answer these questions, it is useful to first consider the outer measure of a
closed and bounded interval, e.g. $[a, b]$, since these intervals are
\hlnotea{compact} under the \hlnotea{Heine-Borel Theorem}. This will give us a
finite subcover for every infinite cover of the compact interval, which is easy
to deal with.
We shall see that with the realization of the outer measure of a compact
interval, we will also be able to find the outer measure of intervals that are
neither open nor closed.
We shall prove the following proposition in the next lecture. Note that for the
sake of presentation, I shall abbreviate the Lebesgue Outer Measure as LOM.
\begin{propononum}[LOM of Arbitrary Intervals]
Suppose $a < b \in \mathbb{R}$. Then
\begin{enumerate}
\item $m^*([a, b]) = b - a$; and therefore
\item $m^*((a, b]) = m^*([a, b)) = m^*((a, b)) = b - a$.
\end{enumerate}
\end{propononum}
% section lebesgue_outer_measure (end)
% chapter lecture_2_may_9th_2019 (end)
\chapter{Lecture 3 May 14th 2019}%
\label{chp:lecture_3_may_14th_2019}
% chapter lecture_3_may_14th_2019
\section{Lebesgue Outer Measure Continued}%
\label{sec:lebesgue_outer_measure_continued}
% section lebesgue_outer_measure_continued
\begin{propo}[LOM of Arbitrary Intervals]\label{propo:lom_of_arbitrary_intervals}
Suppose $a < b \in \mathbb{R}$. Then
\begin{enumerate}
\item $m^*([a, b]) = b - a$; and therefore
\item $m^*((a, b]) = m^*([a, b)) = m^*((a, b)) = b - a$.
\end{enumerate}
\end{propo}
\begin{proof}
\begin{enumerate}
\item Consider $a < b \in \mathbb{R}$. Let $\epsilon > 0$, and let
\begin{equation*}
I_1 = \left( a - \frac{\epsilon}{2}, \, b + \frac{\epsilon}{2} \right)
\end{equation*}
and $I_n = \emptyset$ for $n \geq 2$. Then $\{ I_n \}_{n=1}^{\infty}$ is a
cover of $[a, b]$. This means that
\begin{equation*}
m^*([a, b]) \leq \sum_{n=1}^{\infty} \ell(I_n) = b - a + \epsilon.
\end{equation*}
So for all $\epsilon \to 0$, we have that
\begin{equation*}
m^*([a, b]) \leq b - a.
\end{equation*}
\sidenote{For the converse, we know that $m^*([a, b]) = \inf \faStar$,
where $\faStar$ is just a placeholder for you-know-what. So $m^*([a,
b])$ is one of the sums. So if we can show that for an arbitrary sum,
$\geq$ holds, our work is done.} Conversely, if $[a, b]$ is covered by open
intervals $\{ I_n \}_{n=1}^{\infty}$, then by compactness of $[a, b]$ (via
the \hlnotea{Heine-Borel Theorem}), we know that we can cover $[a, b]$ by
finitely many of these intervals, and let us denote these as $\{ I_n
\}_{n=1}^{N}$, for some $1 \leq N < \infty$.
WTS
\begin{equation*}
\sum_{n=1}^{N} \ell(I_n) \geq b - a.
\end{equation*}
If LHS $= \infty$, then our work is done. Thus wlog, WMA each $I_n = (a_n,
b_n)$ is a finite interval. Note that we have
\begin{equation*}
[a, b] \subseteq \bigcup_{n=1}^{N} (a_n, b_n).
\end{equation*}
In particular, $a \in \bigcup_{n=1}^{N} I_n$. Thus, $\exists 1 \leq n_2
\leq N$ such that $a \in I_{n_1}$. Now if $b_{n_1} > b$, we shall stop
this process for our work is done, since then $[a, b] \subseteq I_{n_1}$.
Otherwise, if $b_{n_1} \leq b$, then $b_{n_1} \in [a, b] \subseteq
\bigcup_{n=1}^{N} I_n$, which means that $\exists 1 \leq n_2 \leq N$ such
that $b_{n_1} \in I_{n_2}$.
\begin{figure*}[ht]
\centering
\begin{tikzpicture}
\draw (-7, 0) -- (7, 0);
\draw (-4, 0.2) -- (-4, -0.2) node[below] {$a$};
\draw (4, 0.2) -- (4, -0.2) node[below] {$b$};
\draw[
{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]},red
] (-5,0) node[above=10pt] {$a_{n_1}$} -- (-2,0) node[above=10pt]
{$b_{n_1}$};
\draw[
{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]},cyan
] (-3,0) node[above=10pt] {$a_{n_2}$} -- (0,0) node[above=10pt]
{$b_{n_2}$};
\node at (1, 0.6) {$\hdots$};
\draw[
{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]},magenta
] (2,0) node[above=10pt] {$a_{n_k}$} -- (5,0) node[above=10pt]
{$b_{n_k}$};
\end{tikzpicture}
\caption{Our continual picking of $I_{n_1}, I_{n_2}, \ldots, I_{n_k}$}
\label{fig:our_continual_picking_of_i__n_1_i__n_2_ldots_i__n_k_}
\end{figure*}
Notice that $n_1 \neq n_2$, since $b_{n_1} \notin I_{n_1}$ but $b_{n_1}
\in I_{n_2}$.
Now once again, if $b_{n_2} > b$, then we shall stop this process since
our work is done. Otherwise, we have $a < b_{n_2} \leq b$, and so $\exists
1 \leq n_3 \leq N$, $n_3 \neq n_1, n_2$, such that $b_{n_2} \in I_3$...
We continue with the above process for as long as $b_{n_k} \leq b$. We can
thus find, for each $k$, $I_{n_{k+1}}$, where $n_{k+1} \in \{ 1, \ldots, N
\} \setminus \{ n_1, n_2, \ldots, n_k \}$, such that $b_{n_k} \in
I_{n_{k+1}}$.
However, since each of the $I_{n_k}$'s are different, and since we only
have $N$ such intervals, there must exists a $K \leq N$ such that
\begin{equation*}
b_{n_{K-1}} \leq b \text{ and } b_{n_K} > b.
\end{equation*}
It now suffices for us to show that
\begin{equation*}
\sum_{j=1}^{K} \ell(I_{n_j}) \geq b - a.
\end{equation*}
Observe that
\begin{align*}
\sum_{j=1}^{K} \ell(I_{n_j})
&= (b_{n_K} - a_{n_K}) + (b_{n_{K-1}} - a_{n_{K-1}}) + \hdots \\
&\quad + (b_{n_2} - a_{n_2}) + (b_{n_1} - a_{n_1}) \\
&= b_{n_K} + \underset{\geq 0}{(b_{n_{K-1}} - a_{n_K})} + \underset{\geq
0}{(b_{n_{K-2}} - a_{n_{K-1}})} + \hdots \\
&\quad + \underset{\geq 0}{(b_{n_1} - a_{n_2})} - a_{n_1} \\
&\geq b_{n_K} - a_{n_1} \geq b - a.
\end{align*}
Thus
\begin{equation*}
\sum_{n=1}^{\infty} \ell(I_n) \geq \sum_{n=1}^{N} \ell(I_n) \geq
\sum_{j=1}^{K} \ell(I_{n_j}) \geq b - a,
\end{equation*}
whence
\begin{equation*}
m^*([a, b]) \geq b - a.
\end{equation*}
It follows that, indeed,
\begin{equation*}
m^*([a, b]) = b - a.
\end{equation*}
\item First, note that
\begin{equation*}
m^*((a, b)) \leq m^*([a, b]) \leq b - a.
\end{equation*}
On the other hand, notice that $\forall 0 < \epsilon < \frac{b - a}{2}$,
we have that
\begin{equation*}
[a + \epsilon, b - \epsilon] \subseteq (a, b),
\end{equation*}
and so by monotonicity,
\begin{equation*}
(b - a) - 2 \epsilon = m^*([a + \epsilon, b - \epsilon]) \leq m^*((a,
b)).
\end{equation*}
As $\epsilon \to 0$, we have that
\begin{equation*}
b - a \leq m^*((a, b)) \leq b - a.
\end{equation*}
So
\begin{equation*}
m^*((a, b)) = b - a
\end{equation*}
as desired.
Finally, we have that
\begin{equation*}
b - a = m^*((a, b)) \leq m^*((a, b]) \leq m^*([a, b]) = b - a,
\end{equation*}
and similarly
\begin{equation*}
b - a = m^*((a, b)) \leq m^*([a, b)) \leq m^*([a, b]) = b - a.
\end{equation*}
Thus
\begin{equation*}
m^*((a, b)) = m^*((a, b]) = m^*([a, b)) = b - a
\end{equation*}
as required.
\end{enumerate}
\end{proof}
\begin{propo}[LOM of Infinite Intervals]\label{propo:lom_of_infinite_intervals}
We have that $\forall a, b \in \mathbb{R}$,
\begin{align*}
m^*((a, \infty)) &= m^*([a, \infty)) \\
&= m^*((-\infty, b)) = m^*((-\infty, b]) \\
&= m^* \mathbb{R} = \infty.
\end{align*}
\end{propo}
\begin{proof}
Observe that
\begin{equation*}
(a, a + n) \subseteq (a, \infty)
\end{equation*}
for all $n \geq 1$. Thus
\begin{equation*}
n = m^*((a, a + n)) \leq m^*((a, \infty))
\end{equation*}
for all $n \geq 1$. Hence
\begin{equation*}
m^*((a, \infty)) = \infty
\end{equation*}
by definition.
All other cases follow similarly.
\end{proof}
\begin{crly}[Uncountability of $\mathbb{R}$]\label{crly:uncountability_of_r}
$\mathbb{R}$ is uncountable.
\end{crly}
\begin{proof}
We have that
\begin{equation*}
m^* \mathbb{R} = \infty \neq 0,
\end{equation*}
and so it follows from
\cref{crly:lebesgue_outer_measure_of_countable_sets_is_zero}, we must have
that $\mathbb{R}$ is uncountable.
\end{proof}
\begin{defn}[Translation Invariant]\index{Translation Invariant}\label{defn:translation_invariant}
Let $\mu$ be an \hyperref[defn:outer_measure]{outer measure} on $\mathbb{R}$.
We say that $\mu$ is \hlnoteb{translation invariant} if $\forall E \subseteq
\mathbb{R}$,
\begin{equation*}
\mu(E) = \mu(E + \kappa)
\end{equation*}
for all $\kappa \in \mathbb{R}$, where
\begin{equation*}
E + \kappa \coloneqq \{ x + \kappa : x \in E \}.
\end{equation*}
\end{defn}
\begin{propo}[Translation Invariance of the LOM]\label{propo:translation_invariance_of_the_lom}
The Lebesgue outer measure is translation invariant.
\end{propo}
\begin{proof}
Let $E \subseteq \mathbb{R}$ and $\kappa \in \mathbb{R}$. Note that $E$ is
covered by open intervals $\{ I_n \}_{n=1}^{\infty}$ iff $E + \kappa$ is
covered by $\{ I_n + \kappa \}_{n=1}^{\infty}$.
\noindent
\hlbnoted{Claim: $\forall n \geq 1$, $\ell(I_n + \kappa) = \ell(I_n)$} Write
\begin{equation*}
I_n = (a_n, b_n).
\end{equation*}
Then
\begin{equation*}
I_n + \kappa = (a_n + \kappa, b_n + \kappa).
\end{equation*}
Observe that
\begin{equation*}
\ell(I_n + \kappa) = b_n + \kappa - (a_n - \kappa) = b_n - a_n = \ell(I_n),
\end{equation*}
as claimed. $\dashv$
By the claim, it follows that
\begin{align*}
m^*(E) &= \inf \left\{ \sum_{n=1}^{\infty} \ell(I_n) : E \subseteq
\bigcup_{n=1}^{\infty} \right\} \\
&= \inf \left\{ \sum_{n=1}^{\infty} \ell(I_n + \kappa) : E + \kappa \subseteq
\bigcup_{n=1}^{\infty} (I_n + \kappa) \right\} \\
&= m^*(E + \kappa).
\end{align*}
\end{proof}
\begin{remark}
Suppose $E \subseteq \mathbb{R}$ and $E = \bigcup_{n=1}^{\infty} E_n$,
where
\begin{equation*}
E_i \cap E_j = \emptyset \text{ if } i \neq j.
\end{equation*}
Now by $\sigma$-subadditivity of $m^*$, we have that
\begin{equation*}
m^* E \leq \sum_{n=1}^{\infty} m^* E_n.
\end{equation*}
However, equality is not guaranteed. Consider the following case: if $E = [0,
1]$, we may have $E_n = [0, 1]$ for all $n >= 1$, in which case $E =
\bigcup_{n=1}^{\infty} E_n = [0, 1]$, but
\begin{equation*}
m^* E = m^* [0, 1] = 1 < \infty = \sum_{n=1}^{\infty} m^* E_n.
\end{equation*}
It would be desirable to have
\begin{equation*}
m^* E = \sum_{n=1}^{\infty} m^* E_n,
\end{equation*}
when the $E_i$'s are pairwise disjoint, i.e. $E = \bigcupdot_{n=1}^{\infty}
E_n$. In fact, this would agree with our intuition, that if the outer measure
is going to be our `length'. Consider the example $A = [0, 2] \cup [5, 7]$.
Then we would expect $m^* A = 2 + 2 = 4$.
However, this is actually impossible for an arbitrary number of collections.
\end{remark}
\begin{thm}[Non-existence of a sensible Translation Invariant Outer Measure that is also $\sigma$-additive]\label{thm:non_existence_of_a_sensible_translation_invariant_outer_measure_that_is_also_sigma_additive}
There does not exist a translation-invariant outer measure $\mu$ on
$\mathbb{R}$ that satisfies
\begin{enumerate}
\item $\mu(\mathbb{R}) > 0$;
\item $\mu[0, 1] < \infty$; and
\item $\mu$ is \hldefn{$\sigma$-additive}; i.e. if $\{ E_n
\}_{n=1}^{\infty}$ is a countable collection of disjoint subsets of
$\mathbb{R}$ that covers $E \subseteq \mathbb{R}$, then
\begin{equation*}
\mu E = \sum_{n=1}^{\infty} \mu E_n.
\end{equation*}
\end{enumerate}
Consequently, the \hyperref[defn:lebesgue_outer_measure]{Lebesgue outer
measure} $m^*$ is \hlimpo{not} $\sigma$-additive.
\end{thm}
\begin{proof}
Suppose to the contrary that such a $\mu$ exists.
\noindent
\hlbnoteb{Step 1} Consider the relation $\sim$ on $\mathbb{R}$ such that $x
\sim y$ if $x - y \in \mathbb{Q}$.
\noindent
\hlbnoted{Claim: $\sim$ is an equivalence relation}
\begin{itemize}
\item (\textbf{reflexivity}) We know that $0 \in \mathbb{Q}$ and $x - x = 0$.
Thus $x \sim x$.
\item (\textbf{symmetry}) Since $\mathbb{Q}$ is a \hlnotea{field}, it is
closed under multiplication, and $-1 \in \mathbb{Q}$. Thus if $x \sim y$,
then $x - y \in \mathbb{Q}$, and so $(-1)(x - y) = y - x \in \mathbb{Q}$,
which means $y \sim x$.
\item (\textbf{transitivity}) Again, since $\mathbb{Q}$ is a field, it is
closed under (this time) addition. Thus
\begin{align*}
&x \sim y \, \land \, y \sim z \implies (x - y), \, (y - z) \in
\mathbb{Q} \\
&\implies (x - y) + (y - z) = x - z \in \mathbb{Q}.
\end{align*}
Thus $x \sim z$.
\end{itemize}
This proves the claim. $\dashv$
Let
\begin{equation*}
[x] \coloneqq x + \mathbb{Q} \coloneqq \{ x + q : q \in \mathbb{Q} \}
\end{equation*}
denote the equivalence class of $x$ wrt $\sim$. Note that the set of
equivalence classes, which we shall represent as
\begin{equation*}
\mathcal{F} \coloneqq \{ [x] : x \in \mathbb{R} \},
\end{equation*}
partitions $\mathbb{R}$, i.e.
\begin{itemize}
\item $[x] = [y] \iff x - y \in \mathbb{Q}$; and
\item $[x] \cap [y] = \emptyset$ otherwise.
\end{itemize}
Note that since $\mathbb{Q}$ is \hlnotea{dense} in $\mathbb{R}$, we have that
$[x] = x + \mathbb{Q}$ is also dense in $\mathbb{R}$, for all $x \in
\mathbb{R}$. Then for each \sidenote{Notice that here, we
have invoked the \hlbnotee{Axiom of Choice}.} $F \in \mathcal{F}$, $\exists
x_F \in F$ such that
\begin{equation*}
0 \leq x_F \leq 1.
\end{equation*}
Now consider the set
\begin{equation*}
\mathbb{V} \coloneqq \{ x_F : F \in \mathcal{F} \} \subseteq [0, 1],
\end{equation*}
which is called \hldefn{Vitali's Set}.
\noindent
\hlbnoteb{Step 2} Since $\mathcal{F}$ partitions $\mathbb{R}$, we have that
\begin{align*}
\mathbb{R} &= \bigcupdot_{F \in \mathcal{F}} F = \bigcupdot_{F \in
\mathcal{F}} [x_F] \\
&= \bigcupdot_{F \in \mathcal{F}} x_F + \mathbb{Q} \\
&= \mathbb{V} + \mathbb{Q} \coloneqq \{ x + q : q \in \mathbb{Q},
x \in \mathbb{V} \}.
\end{align*}
\noindent
\hlbnoteb{Step 3} \hlbnoted{Claim: $p \neq q \in \mathbb{Q} \implies
(\mathbb{V} + p) \cap (\mathbb{V} + q) = \emptyset$} Suppose not, and suppose
$\exists y \in (\mathbb{V} + p) \cap (\mathbb{V} + q)$. Then $\exists F_1, F_2
\in \mathcal{F}$ such that
\begin{equation}\label{eq:nonexistence_of_transinvar_outmea_eq1}
y = x_{F_1} + p = x_{F_2} + q.
\end{equation}
Then we may rearrange the above equation to get
\begin{equation*}
x_{F_1} - x_{F_2} = q - p \in \mathbb{Q}.
\end{equation*}
This implies that
\begin{equation*}
[x_{F_1}] = [x_{F_2}] \implies F_1 = F_2
\end{equation*}
since $\mathbb{V}$ consists of one unique representative from each of the
equivalence classes. However, this would mean that
\begin{equation*}
x_{F_1} = x_{F_2}.
\end{equation*}
Since $p \neq q$, we have that
\begin{equation*}
x_{F_1} + p \neq x_{F_2} + q,
\end{equation*}
which contradicts \cref{eq:nonexistence_of_transinvar_outmea_eq1}. Thus
\begin{equation*}
(\mathbb{V} + p) \cap (\mathbb{V} + q) = \emptyset,
\end{equation*}
as claimed. $\dashv$
This in turn means that the $\mathbb{V} + q$, for each $q \in \mathbb{Q}$,
also partitions $\mathbb{R}$. In other words, if we write $\mathbb{Q} = \{ p_n
\}_{n=1}^{\infty}$, then
\begin{equation*}
\mathbb{R} = \mathbb{V} + \mathbb{Q} = \bigcupdot_{n=1}^{\infty} \mathbb{V}
+ p_n.
\end{equation*}
Now, note that
\begin{equation*}
0 \neq \mu \mathbb{R} \overset{(1)}{=} \sum_{n=1}^{\infty} \mu(\mathbb{V} +
p_n) \overset{(2)}{=} \sum_{n=1}^{\infty} \mu(\mathbb{V}),
\end{equation*}
where $(1)$ is by $\mu$ being $\sigma$-additive and $(2)$ is by $\mu$ being
translation invariant, both directly from our assumptions. This means that
\begin{equation*}
\mu \mathbb{V} > 0.
\end{equation*}
\noindent
\hlbnoteb{Step 4} Now consider $S = \mathbb{Q} \cap [0, 1]$ such that $S$ is
denumerable. Write
\begin{equation*}
S = \{ s_n \}_{n=1}^{\infty}.
\end{equation*}
Note that for all $n \geq 1$,
\begin{equation*}
\mathbb{V} \subseteq [0, 1] \implies \mathbb{V} + s_n \subseteq [0, 2],
\end{equation*}
and as proven above
\begin{equation*}
i \neq j \implies (\mathbb{V} + s_i) \cap (\mathbb{V} + s_j) = \emptyset.
\end{equation*}
Thus it follows that
\begin{equation*}
\mu \left( \bigcupdot_{n=1}^{\infty} \mathbb{V} + s_n \right)
= \sum_{n=1}^{\infty} \mu (\mathbb{V} + s_n)
= \sum_{n=1}^{\infty} \mu(\mathbb{V})
= \infty.
\end{equation*}
Also,
\begin{align*}
\mu \left( \bigcupdot_{n=1}^{\infty} \mathbb{V} + s_n \right)
&= \sum_{n=1}^{\infty} \mu (\mathbb{V} + s_n) \\
&\leq \mu([0, 2]) = \mu([0, 1] \cup ([0, 1] + 1)) \\
&\leq \mu[0, 1] + \mu([0, 1] + 1) \\
&= 2 \mu([0, 1]) = 2 < \infty,
\end{align*}
contradicting what we have right above.
Therefore, no such $\mu$ exists.
\end{proof}
With the realization of
\cref{thm:non_existence_of_a_sensible_translation_invariant_outer_measure_that_is_also_sigma_additive},
we find ourselves facing a losing dilemma: we may either
\begin{enumerate}
\item be happy with the Lebesgue outer measure $m^*$ for \hlbnotee{all}
subsets $E \subseteq \mathbb{R}$, which would \hlbnotec{agree with our
intuitive notion} of length, at the price of $\sigma$-additivity; or
\item restrict the \hlbnotee{domain} of our function $m^*$ to some family of
subsets of $\mathbb{R}$, where $m^*$ would have $\sigma$-additivity.
\end{enumerate}
We shall adopt the second approach. We shall call the collection of sets where
$m^*$ has $\sigma$-additivity as the collection of \hlnotea{Lebesgue measurable
sets}.
% section lebesgue_outer_measure_continued (end)
\section{Lebesgue Measure}%
\label{sec:lebesgue_measure}
% section lebesgue_measure
We shall first introduce \hlnotea{Carath\'{e}odory's} definition of a Lebesgue
measurable set.
\begin{defn}[Lebesgue Measureable Set]\index{Lebesgue Measureable Set}\label{defn:lebesgue_measureable_set}
A set $E \subseteq \mathbb{R}$ is said to be \hlnoteb{Lebesgue measurable} if,
$\forall X \subseteq \mathbb{R}$,
\begin{equation*}
m^* X = m^* ( X \cap E ) + m^* (X \setminus E).
\end{equation*}
We denote the collection of all Lebesgue measurable sets as
$\mathfrak{M}(\mathbb{R})$.
\end{defn}
\begin{remark}
Since we shall almost exclusively focus on the Lebesgue measure, we shall
hereafter refer to ``Lebesgue measurable sets'' as simply ``measurable sets''.
\end{remark}
\begin{note}
I shall quote and paraphrase this remark from our course notes
\cite{marcoux2019}:
\begin{quotebox}{magenta}{foreground}
Informally, we see that a set $E \subseteq \mathbb{R}$ is measurable
provided that it is a ``universal slicer'', that it ``slices'' every other
set $X$ into two \hlwarn{disjoint} sets, into where the Lebesgue outer
measure is \hlimpo{$\sigma$-additive}.
\end{quotebox}
Also, note that we get the following inequality for free, simply from
$\sigma$-subadditivity of $m^*$:
\begin{equation*}
m^* X \leq m^* (X \cap E) + m^* (X \setminus E).
\end{equation*}
Thus, it suffices for us to check if the reverse inequality holds for all sets
$X \subseteq \mathbb{R}$.
\end{note}
Before ploughing forward to getting out hands dirty with examples, let us first
study a result on a structure of $\mathfrak{M}(\mathbb{R})$ that is rather
interesting. \sidenote{For those who has dirtied themselves in the world of
probability and statistics, especially probability theory, get ready to get
excited!}
\begin{defn}[Algebra of Sets]\index{Algebra of Sets}\index{$\sigma$-algebra of Sets}\label{defn:algebra_of_sets}\label{defn:sigma_algebra_of_sets}
A collection $\Omega \subseteq \mathcal{P}(\mathbb{R})$ is said to be an
\hlnoteb{algebra of sets} if
\begin{enumerate}
\item $\mathbb{R} \in \Omega$;
\item (\textbf{closed under complementation}) $E \in \Omega \implies E^C \in
\Omega$; and
\item (\textbf{closed under finite union}) given $N \geq 1$ and $\{ E_n
\}_{n=1}^{N} \subseteq \Omega$, then
\begin{equation*}
\bigcup_{n=1}^{N} E_n \in \Omega.
\end{equation*}
\end{enumerate}
We say that $\Omega$ is a \hlnoteb{$\sigma$-algebra of sets} if
\begin{enumerate}
\item $\Omega$ is an algebra of sets; and
\item (\textbf{closed under countable union}) if $\{ E_n \}_{n=1}^{\infty}
\subseteq \Omega$, then
\begin{equation*}
\bigcup_{n=1}^{\infty} E_n \in \Omega.
\end{equation*}
\end{enumerate}
\end{defn}
\begin{note}
We often call a $\sigma$-algebra of sets as simply a $\sigma$-algebra.
\end{note}
\begin{thm}[$\mathfrak{M}(\mathbb{R})$ is a $\sigma$-algebra]\label{thm:_m_r_is_a_sigma_algebra}
The collection $\mathfrak{M}(\mathbb{R})$ of Lebesgue measurable sets in
$\mathbb{R}$ is a $\sigma$-algebra.
\end{thm}
Due to time constraints, we shall prove the first 2 requirements in this lecture
and prove the last requirement next time (which is also really long).
\begin{proof}
\hlbnoted{$\mathbb{R} \in \mathfrak{M}(\mathbb{R})$} Observe that $\forall X
\subseteq \mathbb{R}$,
\begin{equation*}
m^* X = m^* X + 0 = m^* X + m^* \emptyset = m^* (X \cap \mathbb{R}) + m^* (X
\setminus \mathbb{R})
\end{equation*}
\noindent
\hlbnoted{$E \in \mathfrak{M}(\mathbb{R}) \implies E^C \in
\mathfrak{M}(\mathbb{R})$} Observe that $\forall X \subseteq \mathbb{R}$,
since $E \in \mathfrak{M}(\mathbb{R})$, we have
$\begin{WithArrows}
m^* X &= m^* ( X \cap E ) + m^* ( X \setminus E ) \Arrow{$A \setminus B = A
\cap B^C$} \\
&= m^* ( X \cap (E^C)^C ) + m^* ( X \cap E^C ) \\
&= m^* ( X \setminus E^C ) + m^* ( X \cap E^C ) \Arrow{rearrangement} \\
&= m^* ( X \cap E^C ) + m^* ( X \setminus E^C )
\end{WithArrows}$
\noindent
Thus $E^C \in \mathfrak{M}(\mathbb{R})$.
\end{proof}
% section lebesgue_measure (end)
% chapter lecture_3_may_14th_2019 (end)
\chapter{Lecture 4 May 16th 2019}%
\label{chp:lecture_4_may_16th_2019}
% chapter lecture_4_may_16th_2019
\section{Lebesgue Measure (Continued)}%
\label{sec:lebesgue_measure_continued}
% section lebesgue_measure_continued
Recalling the last theorem we were in the middle of proving, it remains for us
to prove that $\mathfrak{M}(\mathbb{R})$ is closed under arbitrary unions of its
elements.
But before we dive in, let's first have a little pep talk.
\begin{strategy}
Since $m^*$ is $\sigma$-subadditive, given $\{ E_n \}_{n=1}^{\infty}$, we need
only prove that $\forall X \subseteq \mathbb{R}$,
\begin{equation*}
m^* X \geq m^* \left( X \cap \bigcup_{n=1}^{\infty} E_n \right) + m^* \left(
X \setminus \bigcup_{n=1}^{\infty} E_n \right).
\end{equation*}
Recall our discussion near the end of
\cref{sec:lebesgue_outer_measure_continued}. We want $\sigma$-additivity,
especially when we are given a set of disjoint intervals. However, our $E_n$'s
are arbitrary, and so they are not necessarily disjoint.
It helps if one has seen how we can slice $\mathbb{R}$ up into disjoint
unions, and consequently we can do so for any of its subsets. We shall not
take that for granted and immediately use it, but we shall work through this
proof in the spirit of that. We shall see how we can slice $\mathbb{R}$ up in
A1.
Once we can, in some way, express $\bigcup_{n=1}^{\infty} E_n$ as a disjoint
union of intervals, we will then show that, indeed, we have
$\sigma$-additivity instead of $\sigma$-subadditivity on this disjoint union.
\end{strategy}
\begin{proof}
\hlbnoted{$\mathfrak{M}(\mathbb{R})$ is closed under arbitrary unions} Suppose
$\{ E_n \}_{n=1}^{\infty} \subseteq \mathfrak{M}(\mathbb{R})$. To show that
$\bigcup_{n=1}^{\infty} E_n \in \mathfrak{M}(\mathbb{R})$, WTS
\begin{equation*}
m^* X = m^* \left( X \cap \bigcup_{n=1}^{\infty} E_n \right) + m^* \left( X
\setminus \bigcup_{n=1}^{\infty} E_n \right).
\end{equation*}
Since $m^*$ is $\sigma$-subadditive, it suffices for us to show that
\begin{equation}\label{eq:leb_mea_set_is_a_sig_alg_biggoal}
m^* X \geq m^* \left( X \cap \bigcup_{n=1}^{\infty} E_n \right) + m^* \left(
X \setminus \bigcup_{n=1}^{\infty} E_n \right).
\end{equation}
\noindent
\hlbnoteb{Step 1} Consider
\begin{equation*}
H_n = \bigcup_{i=1}^{n} E_i, \quad \forall n \geq 1.
\end{equation*}
\hlbnotea{Claim: $H_n \in \mathfrak{M}(\mathbb{R})$, $\forall n \geq 1$} We
shall prove this by induction on $n$.
When $n = 1$, we have $H_1 = E_1 \in \mathfrak{M}(\mathbb{R})$ by assumption,
and so we are done. Suppose that $H_k \in \mathfrak{M}(\mathbb{R})$ for some
$k \in \mathbb{N}$. Consider $n = k + 1$.
Since we will need the piece $X \cap H_{k+1}$, first, notice that
\begin{equation*}
X \cap H_{k + 1} = X \cap ( H_k \cup E_{k + 1} ) = (X \cap H_k) \cup ((X
\setminus H_k) \cap E_{k + 1}),
\end{equation*}
and in particular that
\begin{equation}\label{eq:leb_mea_set_is_a_sig_alg_step1_key}
X \cap H_{k+1} = X \cap ( H_k \cup E_{k + 1} ) \subseteq (X \cap H_k) \cup
((X \setminus H_k) \cap E_{k + 1}).
\end{equation}
This may be (will be) useful later on, and we can guess that we will be using
$\sigma$-subadditivity on this.
By the IH, since $H_k \in \mathfrak{M}(\mathbb{R})$, we have
\begin{equation*}
m^* X = m^* ( X \cap H_k ) + m^* ( X \setminus H_k ).
\end{equation*}
Notice the similarity between the above equation and
\cref{eq:leb_mea_set_is_a_sig_alg_step1_key}, where we are just off by that
$\cap E_{k + 1}$.
Since $E_{k + 1} \in \mathfrak{M}(\mathbb{R})$, we have
\begin{equation*}
m^* (X \setminus H_k) = m^* ( X \setminus H_k \cap E_{k+1} ) + m^* ( X
\setminus H_k \setminus E_{k+1} ).
\end{equation*}
To clean the above equation up a little bit, notice that by \hlnotea{De
Morgan's Law},
\begin{equation*}
X \setminus H_k \setminus E_{k+1} = X \cap \bigcup_{i=1}^{k} E_i^C \cap
E_{k+1}^C = X \setminus H_{k + 1}.
\end{equation*}
So
\begin{equation*}
m^* (X \setminus H_k) = m^* ( X \setminus H_k \cap E_{k+1} ) + m^* ( X
\setminus H_{k + 1} ).
\end{equation*}
Thus
\begin{equation*}
m^* X = m^* (X \cap H_k) + m^*(X \setminus H_k \cap E_{k+1}) + m^*(X
\setminus H_{k+1}).
\end{equation*}
Using \cref{eq:leb_mea_set_is_a_sig_alg_step1_key} and $\sigma$-subadditivity,
we have that
\begin{equation*}
m^* X \geq m^* ( X \cap H_{k + 1} ) + m^* ( X \setminus H_{k+1} ),
\end{equation*}
which is what we need. Thus $\forall k \geq 1$, $H_k \in
\mathfrak{M}(\mathbb{R})$. $\dashv$
\noindent
\hlbnoteb{Step 2} Consider $F_1 = H_1 = E_1 \in \mathfrak{M}(\mathbb{R})$,
and for $k \geq 2$,
\begin{equation*}
F_k = H_k \setminus H_{k-1} = H_k \cap H_{k-1}^{C}.
\end{equation*}
\sidenote{Note that we cannot assume that $\mathfrak{M}(\mathbb{R})$ is closed under
finite intersections because that is part of what we want to prove.}
\hlbnotea{Claim: $\forall k \geq 2$, $F_k \in \mathfrak{M}(\mathbb{R})$}
First, notice that
\begin{equation*}
F_k^C = \left( H_k \cap H_{k+1}^C \right)^C = H_k^C \cup H_{k+1}.
\end{equation*}
By \textbf{step 1} \sidenote{\hlwarn{I need to get this clarified.}}, we have
that $F_k^C \in \mathfrak{M}(\mathbb{R})$, and thus by closure under
complementation, $F_k \in \mathfrak{M}(\mathbb{R})$.
Also, note that the $F_i$'s are pairwise disjoint. Suppose not, i.e. that
$\exists x \in F_a \cap F_b$ for some $a, b \geq 1$ and $a \neq b$. Wlog, wma
$a < b$. Note that $H_a \subseteq H_b$, since
\begin{equation*}
H_a = \bigcup_{i=1}^{a} E_i \subsetneq \bigcup_{i=1}^{b} E_i = H_b.
\end{equation*}
Since $F_b = H_b \setminus H_{b - 1}$,
\begin{equation*}
x \in F_b \implies x \notin \bigcup_{i=1}^{b-1} E_i \supseteq \bigcup_{i=1}^{a}
E_i,
\end{equation*}
and so $x \notin E_i$ for $1 \leq i \leq a \leq b - 1$. But we assumed that
\begin{equation*}
x \in F_a = H_a \setminus H_{a - 1},
\end{equation*}
i.e. it must be that $x \in E_a$, a contradiction.
\noindent
\hlbnoteb{Step 3} We now have
\begin{equation*}
E = \bigcup_{i=1}^{\infty} E_i = \bigcup_{i=1}^{\infty} H_i =
\bigcupdot_{i=1}^{\infty} F_i.
\end{equation*}
\cref{eq:leb_mea_set_is_a_sig_alg_biggoal} becomes \sidenote{I refrained from
changing the second term to the disjoint union. Retrospectively (i.e. once
you're done with the proof), it makes sense to not consider this move, since
there is no point looking at $X$ take away a bunch of disjoint intervals.}
\begin{equation*}
m^* X \geq m^* \left( X \cap \left( \bigcupdot_{i=1}^{\infty} F_i \right)
\right) + m^* \left( X \setminus E \right).
\end{equation*}
Since the $F_i$'s are disjoint, we expect
\begin{equation*}
m^* \left( X \cap \bigcupdot_{i=1}^{\infty} F_i \right) =
\sum_{i=1}^{\infty} m^* (X \cap F_i).
\end{equation*}
i.e. for every $n$,
\begin{equation*}
m^* \left( X \cap \bigcupdot_{i=1}^{n} F_i \right) =
\sum_{i=1}^{n} m^* (X \cap F_i).
\end{equation*}
Let's prove this inductively. It is clear that case $n = 1$ is trivially true.
Suppose that this is true up to some $k \in \mathbb{N}$. Consider case $n = k
+ 1$. Since $F_{k+1} \in \mathfrak{M}(\mathbb{R})$, we have that
\sidenote{This is quite a smart trick!}
$\begin{WithArrows}
&m^* \left( X \cap \bigcup\limits_{i=1}^{k+1} F_i \right) \\
&= m^* \left( X \cap \bigcup\limits_{i=1}^{k+1} F_i \cap F_{k+1} \right)
+ m^* \left( \left( X \setminus \bigcup\limits_{i=1}^{k=1} F_i \right) \setminus
F_{k+1} \right) \\
&= m^* (X \cap F_{k+1}) + m^*\left(X \cap \bigcup\limits_{i=1}^{k}
F_i\right) \Arrow{IH} \\
&= m^* (X \cap F_{k+1}) + \sum_{i=1}^{k} m^* (X \cap F_i) \\
&= \sum\limits_{i=1}^{k+1} m^* (X \cap F_i).
\end{WithArrows}$
Our claim is complete by induction.
\noindent
\hlbnoteb{Step 4} With \textbf{Step 3},
\cref{eq:leb_mea_set_is_a_sig_alg_biggoal} has become
\begin{equation*}
m^* X \geq \sum_{i=1}^{\infty} m^* ( X \cap F_i ) + m^* ( X \setminus E ).
\end{equation*}
\sidenote{This is a reward for the clear-minded, cause I certainly did not
find it an obvious step to take.} Since $H_k \in \mathfrak{M}(\mathbb{R})$ for
each $k \geq 1$, we have
\begin{equation}\tag{$*$}\label{eq:leb_mea_set_is_a_sig_alg_steppingstone}
m^* X = m^* (X \cap H_k) + m^* (X \setminus H_k).
\end{equation}
Since
\begin{equation*}
H_k = \bigcup_{i=1}^{k} E_i = \bigcup_{i=1}^{\infty} E_i = E,
\end{equation*}
we have that
\begin{equation*}
X \setminus H_k \supseteq X \setminus E,
\end{equation*}
for each $k \geq 1$. Thus by monotonicity,
\cref{eq:leb_mea_set_is_a_sig_alg_steppingstone} becomes
\begin{align*}
m^* X &\geq m^*(X \cap H_k) + m^*(X \setminus E) \\
&= m^* \left( X \cap \left( \bigcup_{i=1}^{\infty} F_i \right) \right)
+ m^*(X \setminus E) \\
&= \sum_{i=1}^{k} m^* (X \cap F_i) + m^* (X \setminus E),
\end{align*}
for each $k \geq 1$.
By letting $k \to \infty$, we have that
\begin{equation*}
m^* X \geq \sum_{i=1}^{\infty} m^* (X \cap F_i) + m^*(X \setminus E).
\end{equation*}
Note that
\begin{equation*}
X \cap E = X \cap \bigcup_{i=1}^{\infty} F_i = \bigcup_{i=1}^{\infty} (X
\cap F_i).
\end{equation*}
By $\sigma$-subadditivity, we have that
\begin{equation*}
m^* ( X \cap E ) \leq \sum_{i=1}^{\infty} m^* (X \cap F_i).
\end{equation*}
Therefore
\begin{equation*}
m^* X \geq m^* (X \cap E) + m^*(X \setminus E),
\end{equation*}
which is what we want!
\end{proof}
\begin{note}[Post-mortem for proof of \cref{thm:_m_r_is_a_sigma_algebra}]
In steps 1 - 3, we try to slice $\bigcup_{n=1}^{\infty} E_n$ into disjoint
\hlimpo{measurable} intervals $F_i$'s. Along the process of constructing them,
it is the showing of them being measurable that takes up most of the proof,
since we require induction.
\end{note}
\begin{propo}[Some Lebesgue Measurable Sets]\label{propo:some_lebesgue_measurable_sets}
\begin{enumerate}
\item If $E \subseteq \mathbb{R}$ and $m^* E = 0$, then $E$ is Lebesgue
measurable.
\item $\forall b \in \mathbb{R}$, $(-\infty, b) \in
\mathfrak{M}(\mathbb{R})$.
\item Every open and every closed set is Lebesgue measurable.
\end{enumerate}
\end{propo}
\begin{proof}
\begin{enumerate}
\item Let $X \subseteq \mathbb{R}$. Note that $X \setminus E \subseteq X$,
and so $\sigma$-subadditivity gives
\begin{equation}\label{eq:zero_measure_is_measureable}
m^* X \geq m^*(X \setminus E).
\end{equation}
On the other hand, $X \cap E \subseteq E$, and so
\begin{equation*}
m^* (X \cap E) \leq m^* E = 0 \implies m^* (X \cap E) = 0.
\end{equation*}
Thus, from \cref{eq:zero_measure_is_measureable},
\begin{equation*}
m^* X \geq ml* (X \setminus E) = m^* (X \cap E) + m^* (X \setminus E).
\end{equation*}
Hence $E \in \mathfrak{M}(\mathbb{R})$ as required.
\item Let $b \in \mathbb{R}$ and $X \subseteq \mathbb{R}$ be arbitrary. WTS
\begin{equation*}
m^* X \geq m^* ( X \cap (-\infty, b) ) + m^* ( X \setminus (-\infty, b) ).
\end{equation*}
\sidenote{We will look at $X \cap (\infty, b)$ and $X \setminus (-\infty,
b)$ more closely, and then realize that since we can cover $X$, we can
``extend'' this cover for these disjoint pieces by taking intersections and
set removals on each of the covering sets.}
Let $E = (-\infty, b)$. Note that if $m^* X = \infty$, then there is
nothing to show. Thus WMA $m^* X < \infty$. In this case, let $\epsilon >
0$, and $\{ I_n \}_{n=1}^{\infty}$ a cover of $X$ by open intervals, where
we write
\begin{equation*}
I_n = (a_n, b_n)
\end{equation*}
for each $n \geq 1$, so that \sidenote{Note that this is legitimate
because $m^* X$ is the infimum of such sums on the LHS, and we can
definitely find such a cover as a result. Also, there is no harm in assuming
that each of the $I_n$'s are non-empty, since we may simply remove all the
empty $I_n$'s from the cover.}
\begin{equation*}
\sum_{n=1}^{\infty} \ell(I_n) < m^* X + \epsilon.
\end{equation*}
For each $n \geq 1$, consider the sets
\begin{equation*}
J_n = I_n \cap E + I_n \cap (-\infty, b)
\end{equation*}
and
\begin{equation*}
K_n = I_n \setminus E = I_n \setminus (\infty, b) = I_n \cap [b,
\infty).
\end{equation*}
The following table captures all possible $J_n$'s and $K_n$'s:
\begin{table}[ht]
\centering
\caption{Possible outcomes of $J_n$ and $K_n$, for each $n \geq 1$}
\label{table:possible_outcomes_of_j_n_and_k_n}
\begin{tabular}{c | c c c}
Case & 1 & 2 & 3 \\
\hline
$b$ & $> b_n$ & $\in I_n$ & $< a_n$ \\
\hline
$J_n$ & $I_n$ & $(a_n, b)$ & $\emptyset$ \\
$K_n$ & $\emptyset$ & $[b, b_n)$ & $I_n$
\end{tabular}
\end{table}
\begin{marginfigure}[100pt]
\centering
\begin{tikzpicture}
\draw (-2, 1) -- (2, 1);
\draw (0, 1.1) -- (0, 0.9) node[below] {$b$};
\draw[
{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]},
red]
(-1.5,1) node[below=10pt] {$a_n$} -- (-0.5,1) node[below=10pt] {$b_n$};
\draw (-2, 0) -- (2, 0);
\draw (0, 0.1) -- (0, -0.1) node[below] {$b$};
\draw[
{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]},
red]
(-1,0) node[below=10pt] {$a_n$} -- (1,0) node[below=10pt] {$b_n$};
\draw (-2, -1) -- (2, -1);
\draw (0, -0.9) -- (0, -1.1) node[below] {$b$};
\draw[
{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]},
red]
(0.5,-1) node[below=10pt] {$a_n$} -- (1.5,-1) node[below=10pt] {$b_n$};
\end{tikzpicture}
\caption{Three possible scenarios of where $b$ stands for different
$I_n$'s}\label{fig:three_possible_scenarios_of_where_b_stands_for_different_i_n_s}
\end{marginfigure}
Notice that $\{ J_n \}_{n=1}^{\infty}$ is an open cover for $X \cap E$.
$\{ K_n \}_{n=1}^{\infty}$ is also a cover of $X \setminus E$ but it is
not an open cover (the only covers of which we consider in this course).
Thus, we consider a small extension $L_n$ of $K_n$ such that
\begin{itemize}
\item if $K_n = \emptyset$, then $L_n = \emptyset$;
\item if $K_n = I_n$, then $L_n = I_n$; and
\item if $K_n = [b, b_n)$, then $L_n = \left( b - \frac{\epsilon}{2^n},
b_n \right)$.
\end{itemize}
Then $\{ L_n \}_{n=1}^{\infty}$ is a cover of $X \setminus E$. By
$\sigma$-subadditivity of $m^*$, we have that
\begin{equation*}
m^* ( X \cap E ) \leq \sum_{n=1}^{\infty} \ell(J_n)
\end{equation*}
and
\begin{equation*}
m^* ( X \setminus E ) \leq \sum_{n=1}^{\infty} \ell(L_n).
\end{equation*}
Thus
\begin{equation*}
m^* (X \cap E) + m^* (X \setminus E) \leq \sum_{n=1}^{\infty} \left(
\ell(J_n) + \ell(L_n) \right).
\end{equation*}
Now, notice that in cases 1 and 3,
\begin{equation*}
\ell(J_n) + \ell(L_n) = \ell(I_n).
\end{equation*}
In case 2, we have that
\begin{equation*}
(\ell(J_n) + \ell(L_n)) - \ell(I_n) < \frac{\epsilon}{2^n}
\end{equation*}
and so
\begin{equation*}
\ell(J_n) + \ell(L_n) < \ell(I_n) + \frac{\epsilon}{2^n}.
\end{equation*}
Therefore
\begin{align*}
&m^* (X \cap E) + m^* (X \setminus E) \\
&\leq \sum_{n=1}^{\infty} \left( \ell(J_n) + \ell(L_n) \right) \\
&\leq \sum_{n=1}^{\infty} \left(\ell(I_n) + \frac{\epsilon}{2^n}\right) \\
&= \sum_{n=1}^{\infty} \ell(I_n) + \epsilon \\
&< \left( m^* X + \epsilon \right) + \epsilon \\
&= m^* X + 2 \epsilon.
\end{align*}
Since $\epsilon > 0$ is arbitrary, we have that
\begin{equation*}
m^* X \geq m^* (X \cap E) + m^* ( X \setminus E ),
\end{equation*}
and since $X$ is arbitrary, we have that $E = (-\infty, b) \in
\mathfrak{M}(\mathbb{R})$.
\item Wlog, suppose $a < b \in \mathbb{R}$. By part 2, we have that
\begin{equation*}
(-\infty, b) \in \mathfrak{M}(\mathbb{R}),
\end{equation*}
and similarly, for $n \geq 1$,
\begin{equation*}
\left(\infty, a + \frac{1}{n}\right) \in \mathfrak{M}(\mathbb{R}).
\end{equation*}
Since $\mathfrak{M}(\mathbb{R})$ is a $\sigma$-algebra, we have that
\begin{equation*}
\left[a + \frac{1}{n}, \infty\right) = \left(-\infty, a +
\frac{1}{n}\right)^C \in \mathfrak{M}(\mathbb{R}),
\end{equation*}
for each $n \geq 1$. Consequently,
\begin{equation*}
(a, \infty) = \bigcup_{n=1}^{\infty} \left[ a + \frac{1}{n}, \infty
\right) \in \mathfrak{M}(\mathbb{R}).
\end{equation*}
Therefore, we have that
\begin{equation*}
(a, b) = (-\infty, b) \cap (a, \infty) \in \mathfrak{M}(\mathbb{R}).
\end{equation*}
\sidenote{We shall prove this in A1.} Since every open set $G \subseteq
\mathbb{R}$ is a countable disjoint union of open intervals in
$\mathbb{R}$, it follows that $G \in \mathfrak{M}(\mathbb{R})$ since
$\mathfrak{M}(\mathbb{R})$ is a $\sigma$-algebra.
If $F \subseteq \mathbb{R}$ is closed, notice that
\begin{equation*}
F^C = G \in \mathfrak{M}(\mathbb{R})
\end{equation*}
since $G$ is open, and so by closure under complementation of
$\sigma$-algebras, $F \in \mathfrak{M}(\mathbb{R})$.
\end{enumerate}
\end{proof}
\begin{defn}[Lebesgue Measure]\index{Lebesgue Measure}\label{defn:lebesgue_measure}
Let $m^*$ denote the Lebesgue outer measure on $\mathbb{R}$. We define the
\hlnoteb{Lebesgue measure} $m$ to be
\begin{equation*}
m = m^* \restriction_{\mathfrak{M}(\mathbb{R})},
\end{equation*}
i.e. $\forall E \in \mathfrak{M}(\mathbb{R})$, we have that
\begin{equation*}
m E = m^* E = \inf \left\{ \sum_{n=1}^{\infty} \ell(I_n) \mid E \subseteq
\bigcup_{n=1}^{\infty} I_n \right\}.
\end{equation*}
\end{defn}
In A2, we shall prove that
\begin{thm}[$\sigma$-additivity of the Lebesgue Measure on Lebesgue Measurable Sets]\label{thm:_sigma_additivity_of_the_lebesgue_measure_on_lebesgue_measurable_sets}
The Lebesgue measure is $\sigma$-additive on $\mathfrak{M}(\mathbb{R})$, i.e.
if $\{ E_n \}_{n=1}^{\infty} \subseteq \mathfrak{M}(\mathbb{R})$ with $E_i
\cap E_j = \emptyset$ for all $i \neq j$, then
\begin{equation*}
m \bigcup_{n=1}^{\infty} E_n = \sum_{n=1}^{\infty} m E_n.
\end{equation*}
\end{thm}
\begin{crly}[Existence of Non-Measurable Sets]\label{crly:existence_of_non_measurable_sets}
There exists non-measurable sets.
\end{crly}
\begin{proof}
Suppose not, i.e. $\mathfrak{M}(\mathbb{R}) =
\mathcal{P}(\mathbb{R})$. Then $m = m^*$ is a
\hyperref[defn:translation_invariant]{translation invariant} outer measure on
$\mathbb{R}$, with $m^* \mathbb{R} = \infty > 0$, $m^* [0, 1] = 1 < \infty$,
and $m^*$ is $\sigma$-additive, which contradicts
\cref{thm:non_existence_of_a_sensible_translation_invariant_outer_measure_that_is_also_sigma_additive}.
Thus $\mathfrak{M}(\mathbb{R}) \neq \mathcal{P}(\mathbb{R})$.
\end{proof}
The following proposition is left as an exercise.
\begin{propo}[Non-measurability of the Vitali Set]\label{propo:non_measurability_of_the_vitali_set}
The Vitali set $\mathbb{V}$, defined in
\cref{thm:non_existence_of_a_sensible_translation_invariant_outer_measure_that_is_also_sigma_additive},
is not measurable.
\end{propo}
\marginnote{
\begin{ex}
Prove \cref{propo:non_measurability_of_the_vitali_set}.
\end{ex}
}
\begin{defn}[$\sigma$-algebra of Borel Sets]\index{$\sigma$-algebra of Borel Sets}\label{defn:_sigma_algebra_of_borel_sets}
The $\sigma$-algebra of sets generated by the collection
\begin{equation*}
\mathfrak{G} \coloneqq \{ G \subseteq \mathbb{R} : G \text{ is open } \}
\end{equation*}
is called the \hlnoteb{$\sigma$-algebra of Borel sets} of $\mathbb{R}$, and is
denoted by
\begin{equation*}
\Bor(\mathbb{R}).
\end{equation*}
\end{defn}
\begin{note}
Since $\Bor(\mathbb{R})$ is generated by open sets in $\mathbb{R}$ and all
open subsets of $\mathbb{R}$ are Lebesgue measurable (cf.
\cref{propo:some_lebesgue_measurable_sets}), we have that
\begin{equation*}
\Bor(\mathbb{R}) \subseteq \mathfrak{M}(\mathbb{R}).
\end{equation*}
\end{note}
\begin{remark}
Since $\Bor(\mathbb{R})$ is a $\sigma$-algebra, and it is, in particular,
generated by open subsets of $\mathbb{R}$, it also contains all of the closed
subsets of $\mathbb{R}$. Thus, we could have instead defined
$\Bor(\mathbb{R})$ to be the $\sigma$-algebra of subsets of $\mathbb{R}$
generated by the collection
\begin{equation*}
\mathfrak{F} \coloneqq \{ F \subseteq \mathbb{R} : F \text{ is closed } \},
\end{equation*}
and in turn conclude that $\Bor(\mathbb{R})$ contains $\mathfrak{G}$.
\end{remark}
\begin{remark}
Let $\mathcal{A} \subseteq \mathcal{P}(\mathbb{R})$, with $\emptyset,
\mathbb{R} \in \mathcal{A}$. Let
\begin{gather*}
\mathcal{A}_{\sigma} \coloneqq \left\{ \bigcup_{n=1}^{\infty} A_n : A_n \in
\mathcal{A}, n \geq 1 \right\} \\
\mathcal{A}_{\delta} \coloneqq \left\{ \bigcap_{n=1}^{\infty} A_n : A_n \in
\mathcal{A}, n \geq 1 \right\}.
\end{gather*}
We call the elements of $\mathcal{A}_{\sigma}$ as \hldefn{$\mathcal{A}$-sigma
sets}, and elements of $\mathcal{A}_{\delta}$ as \hldefn{$\mathcal{A}$-delta sets}.
Recalling our definitions
\begin{gather*}
\mathfrak{G} = \{ G \subseteq \mathbb{R} \mid G \text{ is open } \} \\
\mathfrak{F} = \{ F \subseteq \mathbb{R} \mid F \text{ is closed } \}
\end{gather*}
from above, notice that
\begin{equation*}
\mathfrak{G}_{\delta} = \left\{ \bigcap_{n=1}^{\infty} G_n \mid G_n \in
\mathfrak{G}, n \geq 1 \right\},
\end{equation*}
which is a \hlimpo{countable intersection of open subsets} of $\mathbb{R}$, and
\begin{equation*}
\mathfrak{F}_{\sigma} = \left\{ \bigcup_{n=1}^{\infty} F_n \mid F_n \in
\mathfrak{F}, n \geq 1 \right\},
\end{equation*}
which is a \hlimpo{countable union of closed subsets} of $\mathbb{R}$, are
both subsets of $\Bor(\mathbb{R})$.
\end{remark}
\newthought{As mentioned before}, the definition of which we provided for a
\hyperref[defn:lebesgue_measureable_set]{Lebesgue measurable set} is from
\hlnotea{Carath\'{e}odory}, which is not the most intuitive definition. We shall
now show that it is equivalent to the original definition of which Lebesgue
himself has provided.
\begin{thm}[Carath\'{e}odory's and Lebesgue's Definition of Measurability]\label{thm:carath'_e_odory_s_and_lebesgue_s_definition_of_measurability}
Let $E \subseteq \mathbb{R}$. TFAE:
\begin{enumerate}
\item $E$ is Lebesgue measurable (Carath\'{e}odory).
\item $\forall \epsilon > 0$, there exists an open $G \supseteq E$
such that
\begin{equation*}
m^* (G \setminus E) < \epsilon.
\end{equation*}
\item There exists a $\mathfrak{G}_{\delta}$-set $H$ such that $E \subseteq
H$ and
\begin{equation*}
m^* ( H \setminus E ) = 0.
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\hlbnoted{$(1) \implies (2)$} If we can find such a $G$ that is open, then
since $E$ is Lebesgue measurable, we have
\begin{equation*}
m G = m (G \cap E) + m (G \setminus E) = m E + m (G \setminus E),
\end{equation*}
and so
\begin{equation}\label{eq:caraleb_1_imp_2_eq1}
m (G \setminus E) = m G - m E.
\end{equation}
So if we can construct such a $G$, that is particularly small enough (within
$\epsilon$-bigger) to contain $E$, our statement is good as done.
\noindent
\hlbnoteb{Case 1: $m E < \infty$} In this case, we may consider a cover $\{
I_n \}_{n=1}^{\infty}$ of $E$ such that
\begin{equation*}
\sum_{n=1}^{\infty} \ell(I_n) < m E + \epsilon.
\end{equation*}
Then we may simply let $G = \bigcup_{n=1}^{\infty} I_n$. Note that since
$\mathfrak{M}(\mathbb{R})$ is a $\sigma$-algebra, $G \in
\mathfrak{M}(\mathbb{R})$. Thus by monotonicity,
\begin{equation*}
m G = m \left( \bigcup_{n=1}^{\infty} I_n \right) \leq
\sum_{n=1}^{\infty} m I_n = \sum_{n=1}^{\infty} \ell(I_n) < m E +
\epsilon.
\end{equation*}
With this, \cref{eq:caraleb_1_imp_2_eq1} becomes
\begin{equation*}
m (G \setminus E) < m E + \epsilon - m E = \epsilon.
\end{equation*}
\noindent
\hlbnoteb{Case 2: $\forall r \in \mathbb{R}, \; m E > r$} Consider
\begin{equation*}
E_k = [-k, k] \cap E
\end{equation*}
\sidenote{\hlwarn{I should get clarification for my understanding of this
approach.} We picked closed intervals instead of open ones so that we deal
with the possible quirkiness of $E$.}for each $k \geq 1$. By
\cref{propo:some_lebesgue_measurable_sets}, closed sets are Lebesgue
measurable, and so for each $k \geq 1$, $E_k \in \mathfrak{M}(\mathbb{R})$.
Note that
\begin{equation*}
E = \bigcup_{k \geq 1} E_k.
\end{equation*}
\sidenote{It would be a quick job if we take the union of the $E_k$'s but note
that the $E_k$'s are not necessarily open!} Note that $E_k \subseteq [-k, k]$,
and so
\begin{equation*}
m E_k \leq m [-k, k] = 2k < \infty.
\end{equation*}
Using a similar approach as in \textbf{Case 1}, we can construct an open set
$G_k$ such that $G_k \supseteq E_k$, and
\begin{equation*}
m (G_k \setminus E_k) < \frac{\epsilon}{2^k},
\end{equation*}
for each $k \geq 1$. Now let
\begin{equation*}
G \coloneqq \bigcup_{k \geq 1} G_k \supseteq \bigcup_{k \geq 1} E_k = E.
\end{equation*}
Note that if $x \in G \setminus E$, then $x \notin E_k$ for all $k \geq 1$,
and $\exists N \geq 1$ such that $x \in G_N$. In particular, we have that
\begin{equation*}
x \in G_N \setminus E_N,
\end{equation*}
and so
\begin{equation*}
G \setminus E \subseteq \bigcup_{k \geq 1} G_k \setminus E_k
\end{equation*}
\sidenote{It is, however, true that equality holds, and it is not difficult to
prove so.}. Therefore
\begin{equation*}
m (G \setminus E) \leq \sum_{k \geq 1} m(G_k \setminus E_k) \leq \sum_{k
\geq 1} \frac{\epsilon}{2^k} = \epsilon.
\end{equation*}
\noindent
\hlbnoted{$(2) \implies (3)$} By $(2)$, for each $n \geq 1$, let $G_n
\supseteq E$ such that
\begin{equation*}
m (G_n \setminus E) < \frac{1}{n}.
\end{equation*}
Let $H \coloneqq \bigcap_{n \geq 1} G_n$, which then $H \in
\mathfrak{G}_{\delta}$. Also, since $E \subseteq G_n$ for all $n \geq 1$, we
have $E \subseteq H$. Also, $H \subseteq G_n$ for each $n$. Thus
\begin{equation*}
H \setminus E \subseteq G_n \setminus E,
\end{equation*}
for each $n \geq 1$. By monotonicity,
\begin{equation*}
m(H \setminus E) \leq m(G_n \setminus E) < \frac{1}{n}
\end{equation*}
for each $n \geq 1$. Therefore
\begin{equation*}
m(H \setminus E) = 0.
\end{equation*}
\noindent
\hlbnoted{$(3) \implies (1)$} Notice that $\mathfrak{G}_{\delta} \subseteq
\Bor(\mathbb{R}) \subseteq \mathfrak{M}(\mathbb{R})$. Suppose $G \in
\mathfrak{G}_{\delta}$, and $E \subseteq H$ such that
\begin{equation*}
m(H \setminus E) = 0.
\end{equation*}
By \cref{propo:some_lebesgue_measurable_sets}, $H \setminus E \in
\mathfrak{M}(\mathbb{R})$. Since $\mathfrak{M}(\mathbb{R})$ is a
$\sigma$-algebra, notice that
\begin{equation*}
E = H \setminus (H \setminus E) = H \cap (H \cap E^C)^C = H \cap H^C \cup E
\in \mathfrak{M}(\mathbb{R}).
\end{equation*}
\end{proof}
% section lebesgue_measure_continued (end)
% chapter lecture_4_may_16th_2019 (end)
\chapter{Lecture 5 May 21st 2019}%
\label{chp:lecture_5_may_21st_2019}
% chapter lecture_5_may_21st_2019
\section{Lebesgue Measure (Continued 2)}%
\label{sec:lebesgue_measure_continued_2}
% section lebesgue_measure_continued_2
Recall from \cref{crly:lebesgue_outer_measure_of_countable_sets_is_zero} that
any countable subset $E \subseteq \mathbb{R}$ has zero Lebesgue outer measure.
From \cref{propo:some_lebesgue_measurable_sets}, we have that $E \in
\mathfrak{M}(\mathbb{R})$ and so $m E = m^* E = 0$. This shows that every
countable set is Lebesgue measurable with Lebesgue measure zero.
\begin{quotebox}{green}{foreground}
But is the converse true? I.e., is every Lebesgue measurable set with Lebesgue
measure zero countable?
\end{quotebox}
We shall show that this is not true by giving a counterexample. We shall now
construct an \hlnotea{uncountable set} $C$ that has measure zero.
\begin{eg}[The Cantor Set]\label{eg:cantor_set}
Let $C_0 = [0, 1]$. Note that $C_0$ is compact and
\begin{equation*}
m^* C_0 = 1 < \infty.
\end{equation*}
\begin{figure}[h]
\begin{center}
\begin{tikzpicture}
\draw[-] (-0.5,0) -- (9.5,0);
\draw[-] (0,.5) -- (0, -.5) node[below] {$0$};
\draw[-] (9,.5) -- (9, -.5) node[below] {$1$};
\node at (1, -.7) {$\frac{1}{9}$};
\node at (2, -.7) {$\frac{2}{9}$};
\node at (3, -.7) {$\frac{1}{3}$};
\node at (6, -.7) {$\frac{2}{3}$};
\node at (7, -.7) {$\frac{7}{9}$};
\node at (8, -.7) {$\frac{8}{9}$};
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (3,0) -- (6,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (1,0) -- (2,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (7,0) -- (8,0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (0, 0) -- (1, 0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (2, 0) -- (3, 0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (6, 0) -- (7, 0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (8, 0) -- (9, 0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (0.333333,0) -- (0.666667,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (2.333333,0) -- (2.666667,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (6.333333,0) -- (6.666667,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (8.333333,0) -- (8.666667,0);
\end{tikzpicture}
\end{center}
\caption{Cantor set showing up to $n = 2$, with the excluded interval in $n = 3$ shown.}
\label{fig:cantor_set}
\end{figure}
Let
\begin{equation*}
C_1 = C_0 \setminus \left( \frac{1}{3}, \frac{2}{3} \right).
\end{equation*}
Then $C_1$ is closed \sidenote{$C_1$ is an intersection of 2 closed sets.} and
$C_0 \supseteq C_1$.
Let
\begin{equation*}
C_2 = C_1 \setminus \left( \left( \frac{1}{9}, \frac{2}{9} \right) \cup
\left( \frac{7}{9}, \frac{8}{9} \right) \right).
\end{equation*}
Then $C_2$ is closed and $C_1 \supseteq C_2$.
We continue this process indefinitely, and construct $C_n$ for each $n \geq
1$, where
\begin{equation*}
C_n = \frac{1}{3} C_{n-1} \cup \left( \frac{2}{3} + \frac{1}{3} C_{n-1} \right).
\end{equation*}
Then $C_n$ will consist of $2^n$ disjoint closed intervals. Thus each $C_n$ is
compact and measurable. Moreover,
\begin{equation*}
m (C_n) = \left( \frac{2}{3} \right)^n,
\end{equation*}
for each $n \geq 1$.
\begin{figure*}[t]
\centering
\begin{tikzpicture}[decoration=Cantor set,line width=3mm]
\draw (0,0) -- (9,0);
\draw decorate{ (0,-.5) -- (9,-.5) };
\draw decorate{ decorate{ (0,-1) -- (9,-1) }};
\draw decorate{ decorate{ decorate{ (0,-1.5) -- (9,-1.5) }}};
\draw decorate{ decorate{ decorate{
decorate{ (0,-2) -- (9,-2) }}}};
\draw decorate{ decorate{ decorate{
decorate{ decorate{ (0, -2.5) -- (9, -2.5) }}}}};
\end{tikzpicture}
\caption[][20pt]{An illustration of the Cantor Set from \url{https://mathforum.org/mathimages/index.php/Cantor_Set}.}\label{fig:an_illustration_of_the_cantor_set_from_https_}
\end{figure*}
Also, we have that
\begin{equation*}
C_0 \supseteq C_1 \supseteq C_2 \supseteq \hdots
\end{equation*}
is a \hlnotea{descending chain of measurable sets}. Note that the sequence $\{
C_n \}_{n=0}^{\infty}$ has the \hlnotea{finite intersection property}, and
since $\mathbb{R}$ is compact, the set
\begin{equation*}
C \coloneqq \bigcap_{n=1}^{\infty} C_n,
\end{equation*}
which we shall call it the \hldefn{Cantor Set}, is non-empty \sidenote{See
\href{https://tex.japorized.ink/PMATH351F18/classnotes.pdf\#thm.74}{FIP and
Compactness} from PMATH 351}.
Now from A2, we have that
\begin{equation*}
m C = \lim_{n \to \infty} m C_n = \lim_{n \to \infty} \left( \frac{2}{3}
\right)^n = 0.
\end{equation*}
We shall now show that $C$ is uncountable. To do this, we shall use the
\hlnotea{ternary representation} for each $x \in [0, 1]$. In particular, for
each $x \in [0, 1]$, we write
\begin{equation*}
x = 0.x_1 x_2 x_3 \hdots,
\end{equation*}
where each $x_i \in \{ 0, 1, 2 \}$ for all $i \geq 1$. Note that in base 10,
we can express
\begin{equation*}
x = \sum_{k=1}^{\infty} \frac{x_k}{10^k} = 0.x_1 + 0.0x_2 + 0.00x_3 + \hdots
\end{equation*}
\marginnote{I shall paraphrase the professor here because I like how the
analogy brings good intuition, for me at least.
\begin{quotebox}{magenta}{foreground}
Suppose there's this person that had only 3 fingers and is not aware of the
existence of the base-10 system, and in turn invented the ternary system.
Then, instead of having 10 regular intervals on $[0, 1]$, it had 3 regular
intervals.
\end{quotebox}
}
Thus, we can similarly express
\begin{equation*}
x = \sum_{k=1}^{\infty} \frac{x_k}{3^k},
\end{equation*}
in ternary representation. However, just as
\begin{equation*}
0.99999 \hdots \text{ and } 1.00000 \hdots
\end{equation*}
are indistinguishable, in ternary representation,
\begin{equation*}
0.22222 \hdots \text{ and } 1.00000 \hdots
\end{equation*}
are indistinguishable. Fortunately, we can find out who exactly are the
culprits that cannot be uniquely represented, which shall be left as an
exercise.
\begin{ex}
Show that the ternary expansion of $x \in [0, 1)$ is unique except when
$\exists N \geq 1$ such that
\begin{equation*}
x = \frac{r}{3^N},
\end{equation*}
for some $0 < r < 3^N$, where $3 \nmid r$.
\end{ex}
In the cases where we have the above $x$, we have that \sidenote{Note that the
representation terminates somewhere, since it is a fraction, i.e. a rational
number.}
\begin{equation*}
x = 0.x_1 x_2 x_3 \hdots x_N,
\end{equation*}
where $x_N \in \{1, 2\}$.
\begin{itemize}
\item If $x_N = 2$, we shall keep this expression; otherwise
\item if $x_N = 1$, then we write
\begin{align*}
x &= 0.x_1 x_2 x_3 \hdots x_{N-2} x_{N-1} 1000 \hdots \\
&= 0.x_1 x_2 x_3 \hdots x_{N_2} x_{N-1} 0222 \hdots,
\end{align*}
and we shall use the second expression.
\end{itemize}
Also, we shall also use the convention that
\begin{equation*}
1 = 0.22222 \hdots.
\end{equation*}
With this, we have obtained a \hlimpo{unique} ternary expansion for each $x
\in [0, 1]$.
\begin{figure*}[ht]
\centering
\begin{tikzpicture}
\draw[-] (-0.5,0) -- (9.5,0);
\draw[-] (0,.5) -- (0, -.5) node[below] {$0$};
\draw[-] (9,.5) -- (9, -.5) node[below] {$1$};
\node at (1, -.7) {$\frac{1}{9}$};
\node at (2, -.7) {$\frac{2}{9}$};
\node at (3, -.7) {$\frac{1}{3}$};
\node at (6, -.7) {$\frac{2}{3}$};
\node at (7, -.7) {$\frac{7}{9}$};
\node at (8, -.7) {$\frac{8}{9}$};
\node[pin={[pin edge={latex'-}, pin distance=20pt]90:{$0.010\hdots =
0.0022\hdots$}}] at (1, 0.3) {};
\node[pin={[pin edge={latex'-}, pin distance=7pt]90:{$0.020\hdots$}}] at
(2, 0.3) {};
\node[pin={[pin edge={latex'-}, pin distance=30pt]90:{$0.022\hdots$}}] at
(3, 0.3) {};
\node[pin={[pin edge={latex'-}, pin distance=7pt]90:{$0.1\hdots$}}] at
(4.5, 0.3) {};
\node[pin={[pin edge={latex'-}, pin distance=30pt]90:{$0.20\hdots$}}] at
(6, 0.3) {};
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (3,0) -- (6,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (1,0) -- (2,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (7,0) -- (8,0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (0, 0) -- (1, 0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (2, 0) -- (3, 0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (6, 0) -- (7, 0);
\draw[{Bracket[length=5pt,width=15pt]}-{Bracket[length=5pt,width=15pt]}
,red] (8, 0) -- (9, 0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (0.333333,0) -- (0.666667,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (2.333333,0) -- (2.666667,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (6.333333,0) -- (6.666667,0);
\draw[{Arc Barb[length=5pt,width=15pt]}-{Arc Barb[length=5pt,width=15pt]}
,blue] (8.333333,0) -- (8.666667,0);
\end{tikzpicture}
\caption{Some values on $[0, 1]$ in ternary representation}
\label{fig:some_values_on_0_1_in_ternary_representation}
\end{figure*}
Now, observe that
\begin{align*}
C_1 &= [0, 1] \setminus \left( \frac{1}{3}, \frac{2}{3} \right) \\
&= \{ x \in [0, 1] : x = 0.x_1 x_2 x_3 \hdots, x_1 \neq 1 \},
\end{align*}
i.e. whichever $x \in [0, 1]$ with $x_1 = 1$ sits in $\left( \frac{1}{3},
\frac{2}{3} \right)$. Similarly,
\begin{equation*}
C_2 = \{ x \in [0, 1] : x = 0.x_1 x_2 x_3 \hdots, x_1 \neq 1, x_2 \neq 1 \}.
\end{equation*}
In general, we have that
\begin{equation*}
C_N = \{ x \in [0, 1] : x = 0.x_1 x_2 x_3 \hdots, x_i \neq 1,\, 1 \leq i \leq
N \}.
\end{equation*}
Therefore,
\begin{align*}
C &= \bigcap_{n=1}^{\infty} C_n \\
&= \{ x \in [0, 1] : x = 0.x_1 x_2 x_3 \hdots,\, x_n \neq 1, n \geq 1 \}
\\
&= \{ x \in [0, 1] : x = 0.x_1 x_2 x_3 \hdots,\, x_n \in \{0, 2\}, n \geq 1 \}
\end{align*}
Now, consider the bijection
\begin{equation*}
\phi : C \to [0, 1]
\end{equation*}
given by
\begin{equation*}
x = 0.x_1 x_2 x_3 \hdots \mapsto y = 0.y_1 y_2 y_3 \hdots,
\end{equation*}
where $x_n \in \{0, 2\}$, for $n \geq 1$, and $x$ is the ternary expansion,
while $y_n = \frac{x_n}{2}$ for each $n \geq 1$, and so $y$ is a binary
expansion. Then $\phi$ is a bijection between $C$ and $[0, 1]$, and therefore
\begin{equation*}
\abs{C} = \abs{[0, 1]} = \abs{\mathbb{R}} = c = 2^{\aleph_0}.
\end{equation*}
\end{eg}
\begin{note}
The lesson here is that the Lebesgue measure is not a measure on the
cardinality of the set. Rather, it measures the \hlimpo{distribution of
points} in the set.
\end{note}
% section lebesgue_measure_continued_2 (end)
\section{Lebesgue Measurable Functions}%
\label{sec:lebesgue_measurable_functions}
% section lebesgue_measurable_functions
\begin{note}
We used
\begin{equation*}
\mathfrak{M}(\mathbb{R}) = \{ E \subseteq \mathbb{R} \mid E \text{ is
measurable } \}
\end{equation*}
to denote the set of measurable subsets of $\mathbb{R}$.
In general, for $H \subseteq \mathbb{R}$, set shall denote by
$\mathfrak{M}(H)$ the collection of all Lebesgue measurable subsets of $H$,
i.e.
\begin{equation*}
\mathfrak{M}(H) = \{ E \subseteq H \mid E \in \mathfrak{M}(\mathbb{R}) \}.
\end{equation*}
In particular, for $E \in \mathfrak{M}(\mathbb{R})$, we also have
\begin{equation*}
\mathfrak{M}(E) = \{ F \subseteq E \mid F \in \mathfrak{M}(\mathbb{R}) \}.
\end{equation*}
\end{note}
\begin{ex}
Prove that the above $\mathfrak{M}(E)$ is a $\sigma$-algebra of sets.
\end{ex}
\begin{defn}[Lebesgue Measurable Function]\index{Lebesgue Measurable Function}\label{defn:lebesgue_measurable_function}
Let $E \in \mathfrak{M}(E)$ and $(X, d)$ a metric space. We say that a
function
\begin{equation*}
f : E \to X
\end{equation*}
is \hlnoteb{Lebesgue measurable} (or simply \hlnoteb{measurable}) if
\begin{equation*}
f^{-1}(G) :\coloneqq \{ x \in E : f(x) \in G \} \in \mathfrak{M}(E)
\end{equation*}
for every open set $G \subseteq X$.
We write
\begin{equation*}
\mathcal{L}(E, X) = \{ f : E \to X \mid f \text{ measurable } \}
\end{equation*}
for the set of measurable functions from $E$ to $X$.
\end{defn}
\begin{ex}
Show that we can equivalently define that a function $f$ is Lebesgue
measurable if
\begin{equation*}
f^{-1}(F) \in \mathfrak{M}(E)
\end{equation*}
for all closed subsets $F \subseteq X$.
\end{ex}
\begin{note}
Note that we required that the domain of the function is a measurable set in
\cref{defn:lebesgue_measurable_function}. Part of the reason is because we
want constant functions to be measurable, and this happens iff the domain of
the function is measurable \sidenote{\hlwarn{Why?}}.
\end{note}
\begin{propo}[Continuous Functions on a Measurable Set is Measurable]\label{propo:continuous_functions_on_a_measurable_set_is_measurable}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $(X, d)$ a metric space. If $f : E
\to X$ is continuous, then $f \in \mathcal{L}(E, X)$.
\end{propo}
\begin{proof}
Since $f$ is continuous in a metric space, it implies that for all open $G
\subseteq X$, $f^{-1}(G)$ is open in $E$ \sidenote{We say that $f^{-1}(G)$ is
\hlnotea{relatively open} in $E$.}. This means that $f^{-1}(G) = U_G \cap E$
for some open $U_G \subseteq \mathbb{R}$. Since $U_G$ is open, by
\cref{propo:some_lebesgue_measurable_sets}, $U_G \in
\mathfrak{M}(\mathbb{R})$. Since $E \in \mathfrak{M}(\mathbb{R})$, we have
that
\begin{equation*}
f^{-1}(G) = U_G \cap E \in \mathfrak{M}(E),
\end{equation*}
and so
\begin{equation*}
f \in \mathcal{L}(E, X).
\end{equation*}
\end{proof}
\begin{eg}\label{eg:characteristic_function_is_measurable_over_a_measurable_set}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $H \subseteq E$. Consider the
characteristic function of $H$, which is
\begin{equation*}
\chi_H : E \to \mathbb{R} \text{ given by } x \mapsto \begin{cases}
1 & x \in H \\
0 & x \notin H
\end{cases}.
\end{equation*}
Let $G \subseteq \mathbb{R}$ be open. Then
\begin{equation*}
\chi_H^{-1}(G) = \begin{cases}
\emptyset & G \cap \{ 0, 1 \} = \emptyset \\
E & G \supseteq \{ 0, 1 \} \\
E \setminus H & G \cap \{ 0, 1 \} = \{ 0 \} \\
H & G \cap \{ 0, 1 \} = \{ 1 \}
\end{cases},
\end{equation*}
in which case we observe that all the possible outcomes are measurable subsets
of $\mathbb{R}$. Thus $\chi_H$ is measurable iff $H \in
\mathfrak{M}(\mathbb{R})$.
\end{eg}
\begin{propo}[Composition of a Continuous Function and a Measurable Function is Measurable]\label{propo:composition_of_a_continuous_function_and_a_measurable_function_is_measurable}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $(X, d_X),\, (Y, d_Y)$ be metric
spaces. Suppose that
\begin{equation*}
f : E \to X \text{ is measurable and } g : X \to Y \text{ is continuous}.
\end{equation*}
Then
\begin{equation*}
g \circ f : E \to Y \text{ is measurable}.
\end{equation*}
\end{propo}
\marginnote{
The idea is simple: $(gf)^{-1}(G) = f^{-1} g^{-1}(G)$ and continuity of $G$
means that $g^{-1}(G)$ is open in $X$.
}
\begin{proof}
Let $G \subseteq Y$ be open. Then since $g$ is continuous, we have that
\begin{equation*}
g^{-1}(G) \subseteq X \text{ is open}.
\end{equation*}
Then since $f$ measurable, we have that
\begin{equation*}
(g \circ f)^{-1}(G) = f^{-1}(g^{-1}(G)) \in \mathfrak{M}(E).
\end{equation*}
Thus $g \circ f \in \mathcal{L}(E, Y)$.
\end{proof}
\begin{eg}
Let $E \in \mathfrak{M}(E)$ and $f \in \mathcal{L}(E, \mathbb{K})$. Let $g :
\mathbb{K} \to \mathbb{R}$ be given by $z \mapsto \abs{z}$. Then $g$ is
continuous. By
\cref{propo:composition_of_a_continuous_function_and_a_measurable_function_is_measurable},
we have that
\begin{equation*}
g \circ f = \abs{f} \text{ is measurable}.
\end{equation*}
\end{eg}
\begin{eg}
Note that the converse to the above is not true, i.e. that if we have that
$\abs{f}$ is measurable, it is not necessary that $f$ is measurable.
Consider $E = \mathbb{R} = \mathbb{K}$. If we take $H \subseteq \mathbb{R}$
that is not measurable, which we know exists, and then consider the function
\begin{equation*}
f : E \to \mathbb{R} \text{ given by } f(x) = \begin{cases}
1 & x \in H \\
-1 & x \notin H
\end{cases},
\end{equation*}
which is constructed by summing up two characteristic functions over $H$ and
then minus 1. Then $\abs{f} = 1$, but
\begin{equation*}
f^{-1}(\{1\}) = H \notin \mathfrak{M}(\mathbb{R}).
\end{equation*}
\end{eg}
\begin{propo}[Component-wise Measurability]\label{propo:component_wise_measurability}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f, g : E \to \mathbb{K}$. Then TFAE:
\begin{enumerate}
\item $f, g \in \mathcal{L}(E, \mathbb{K})$;
\item $h : E \to \mathbb{K}^2$ given by $x \mapsto (f(x), g(x))$ is
measurable.
\end{enumerate}
\end{propo}
\begin{proof}
\hlbnoted{$(2) \implies (1)$} \sidenote{Awareness about projective maps is a
plus here.} Let
\begin{gather*}
\pi_1 : \mathbb{K}^2 \to \mathbb{K} \text{ given by } (w, z) \mapsto w \\
\pi_2 : \mathbb{K}^2 \to \mathbb{K} \text{ given by } (w, z) \mapsto z
\end{gather*}
so that $\pi_1, \pi_2$ are continuous. Then by
\cref{propo:composition_of_a_continuous_function_and_a_measurable_function_is_measurable},
we have that
\begin{equation*}
\pi_1 \circ h = f \text{ and } \pi_2 \circ h = g
\end{equation*}
are both measurable.
\noindent
\hlbnoted{$(1) \implies (2)$} Let $G \subseteq \mathbb{K}^2$ be open. We can
write $G$ as a countable union of open sets \sidenote{If you are unsure about
this, think $\epsilon-\delta$.}, i.e.
\begin{equation*}
G = \bigcup_{n=1}^{\infty} A_n \times B_n,
\end{equation*}
where $A_n, B_n \subseteq \mathbb{K}$ are open. Then
\begin{align*}
h^{-1}(G) &= h^{-1} \left( \bigcup_{n=1}^{\infty} A_n \times B_n \right) \\
&= \bigcup_{n=1}^{\infty} \underbrace{f^{-1}(A_n)}_{\in
\mathfrak{M}(\mathbb{K})} \cap \underbrace{g^{-1}(B_n)}_{\in
\mathfrak{M}(\mathbb{K})} \in \mathfrak{M}(\mathbb{K})
\end{align*}
Thus $h \in \mathcal{L}(E, \mathbb{K}^2)$.
\end{proof}
\begin{propo}[$\mathcal{L}(E, \mathbb{K})$ is a Unital Algebra]\label{propo:_l_e_k_is_a_unital_algebra}
Let $E \in \mathfrak{M}(\mathbb{R})$. Then $\mathcal{L}(E, \mathbb{K})$ is a
\hldefn{unital algebra}, i.e. if $f, g \in \mathcal{L}(E, \mathbb{K})$, then
\begin{enumerate}
\item $f + g \in \mathcal{L}(E, \mathbb{K})$;
\item $fg \in \mathcal{L}(E, \mathbb{K})$ \sidenote{Here, it's
multiplication of two functions, not compositions};
\item $g(x) \neq 0,\, \forall x \in E \implies \frac{f}{g} \in
\mathcal{L}(E, \mathbb{K})$; and
\item if $h : E \to \mathbb{K}$ is constant, then $h \in \mathcal{L}(E,
\mathbb{K})$.
\end{enumerate}
In particular, $\mathcal{L}(E, \mathbb{K})$ is an \hldefn{algebra}.
\end{propo}
\begin{proof}
We shall make use of this clever trick \sidenote{"Clever trick" = "Trick you
should learn".}. Let $\mu : E \to \mathbb{K}^2$ given by $x \mapsto (f(x),
g(x))$. Note that since $f, g \in \mathcal{L}(E, \mathbb{K})$, by
\cref{propo:component_wise_measurability}, $\mu \in \mathcal{L}(E,
\mathbb{K}^2)$.
\begin{enumerate}
\item Consider the function
\begin{equation*}
\sigma : \mathbb{K}^2 \to \mathbb{K} \text{ given by } (w, z) \mapsto w
+ z.
\end{equation*}
It is clear that $\sigma$ is continuous. Then
\begin{equation*}
\sigma \circ \mu : x \mapsto f(x) + g(x)
\end{equation*}
is measurable by
\cref{propo:composition_of_a_continuous_function_and_a_measurable_function_is_measurable}.
\item Consider the function
\begin{equation*}
\sigma : \mathbb{K}^2 \to \mathbb{K} \text{ given by } (w, z) \mapsto
wz.
\end{equation*}
Again, we see that $\sigma$ is continuous. Then
\begin{equation*}
\sigma \circ \mu : x \mapsto f(x)g(x)
\end{equation*}
is measurable by
\cref{propo:composition_of_a_continuous_function_and_a_measurable_function_is_measurable}.
\item Consider the function
\begin{equation*}
\sigma : \mathbb{K} \times (\mathbb{K} \setminus \{0\}) \to \mathbb{K}
\text{ given by } (w, z) \mapsto \frac{w}{z}.
\end{equation*}
Again, $\sigma$ is continuous. Thus
\begin{equation*}
\sigma \circ \mu : x \mapsto \frac{f(x)}{g(x)}
\end{equation*}
is measurable by
\cref{propo:composition_of_a_continuous_function_and_a_measurable_function_is_measurable}.
\item Suppose $h : E \to \mathbb{K}$ is a constant, and we have $h(x) = \alpha_0$
for all $x \in E$. Then for any $G \subseteq \mathbb{K}$ that is open, we
have that
\begin{equation*}
h^{-1}(G) = \begin{cases}
\emptyset & a_0 \notin G \\
E & a_0 \in G
\end{cases},
\end{equation*}
both of which are measurable sets. Thus $h$ is indeed measurable.
\end{enumerate}
\end{proof}
\begin{warning}[Composition of Measurable Functions Need Not be Measurable]\label{warning:composition_of_measurable_functions_need_not_be_measurable}
It is important to note that compositions of measurable functions do not have
to be measurable. Here is a counterexample \sidenote{Source:
\citealt{mirjam_mathse_2013}}.
Let $f : [0, 1] \to [0, 1]$ be the \hlnotea{Cantor-Lebesgue Function}
\sidenote{Seen in A2Q5.}. Note that $f$ is a monotonic and continuous
function, and the image $f(C)$ of the Cantor set $C$ is all of $[0, 1]$. Let
$g(x) = x + f(x)$. It is clear that $g: [0, 1] \to [0, 2]$ is a strictly
monotonic and continuous map. In particular, $h = g^{-1}$ is also continuous.
\end{warning}
\begin{remark}\label{remark:about_complex_val_fns}
Note that $(\mathbb{C}, d)$, where $d(w, z) = \abs{w - z}$, is a metric space.
Moreover, the map
\begin{equation*}
\gamma : \mathbb{C} \to \mathbb{R}^2 \text{ given by } x + iy \mapsto (x,
y),
\end{equation*}
where $x, y \in \mathbb{R}$ is a homeomorphism, which, in particular, is
continuous. Then given a $E \in \mathfrak{M}(\mathbb{R})$ with a measurable $f
\in E \to \mathbb{C}$, then
\begin{equation*}
\gamma \circ f : E \to \mathbb{R}^2 \in \mathcal{L}(E, \mathbb{R}^2).
\end{equation*}
Also, notice that
\begin{equation*}
\gamma \circ f = (\Re f, \Im f).
\end{equation*}
By \cref{propo:component_wise_measurability}, $\Re f, \Im f \in \mathcal{L}(E,
\mathbb{R})$. This also means that
\begin{equation*}
h : x \mapsto (\Re f(x), \Im f(x)) \in \mathcal{L}(E, \mathbb{R}^2).
\end{equation*}
Conversely, if $\Re f, \Im f \in \mathcal{L}(E, \mathbb{R})$, then
\begin{equation*}
f = \gamma^{-1} \circ h \in \mathcal{L}(E, \mathbb{C})
\end{equation*}
by \cref{propo:component_wise_measurability}.
This means that a \hlimpo{complex-valued function is measurable iff its real
and imaginary parts are both measurable}. Consequently, to study about
complex-valued functions, it is sufficient for us to study about real-valued
functions.
\end{remark}
\begin{propo}[Measurable Function Broken Down into an Absolute Part and a Scaling Part]\label{propo:measurable_function_broken_down_into_an_absolute_part_and_a_scaling_part}
Let $E \in \mathfrak{M}(\mathbb{R})$ and suppose that $f : E \to \mathbb{C}$
is measurable. Then there exists a measurable function $\Theta : E \to
\mathbb{T}$, where
\begin{equation*}
\mathbb{T} \coloneqq \{ z \in \mathbb{C} \mid \abs{z} = 1 \},
\end{equation*}
such that
\begin{equation*}
f = \Theta \cdot \abs{f}.
\end{equation*}
\end{propo}
\begin{proof}
Since $\{ 0 \} \subseteq \mathbb{C}$ is closed and $f$ is measurable, we have
that
\begin{equation*}
K \coloneqq f^{-1}(\{0\}) \in \mathfrak{M}(E).
\end{equation*}
Since $\chi_K$ is a measurable function, we have that $f + \chi_K$ is also
measurable (cf. \cref{propo:_l_e_k_is_a_unital_algebra}).
\noindent
\hlbnoted{Claim: $f + \chi_K \neq 0$ over $E$.}
\begin{itemize}
\item If $x \in E$ such that $f(x) = 0$, then $x \in K$, and so $\chi_K(x) =
1$.
\item If $x \in E$ such that $\chi_K(x) = 0$, then $x \notin K$, which means
$f(x) \neq 0$.
\end{itemize}
Therefore, consider the function
\begin{equation*}
\Theta = \frac{f + \chi_K}{\abs{f + \chi_K}} : E \to \mathbb{T}.
\end{equation*}
By \cref{propo:_l_e_k_is_a_unital_algebra}, $\Theta$ is measurable, and
clearly
\begin{equation*}
f = \Theta \cdot \abs{f}.
\end{equation*}
\end{proof}
\begin{remark}
As of now, given a set $E \in \mathfrak{M}(\mathbb{R})$, to verify that a
function $f \in \mathcal{L}(E, \mathbb{R})$, we need to check that
\begin{equation*}
\forall G \subseteq \mathbb{R} \text{ open },\, f^{-1}(G) \in
\mathfrak{M}(E).
\end{equation*}
Since there is an obscene amount of open (respectively closed) subsets of
$\mathbb{R}$, we want to be able to reduce our workload. This shall be the
first thing we do in the next lecture.
\end{remark}
% section lebesgue_measurable_functions (end)
% chapter lecture_5_may_21st_2019 (end)
\chapter{Lecture 6 May 23rd 2019}%
\label{chp:lecture_6_may_23rd_2019}
% chapter lecture_6_may_23rd_2019
\section{Lebesgue Measurable Functions (Continued)}%
\label{sec:lebesgue_measurable_functions_continued}
% section lebesgue_measurable_functions_continued
\begin{propo}[Function Measurability Check]\label{propo:function_measurability_check}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f : E \to \mathbb{R}$ be a function.
TFAE:
\begin{enumerate}
\item $f$ is measurable, i.e. $\forall G \subseteq \mathbb{R}$ that is open,
$ f^{-1}(G) \in \mathfrak{M}(E)$.
\item $\forall a \in \mathbb{R}$, $f^{-1}((a, \infty)) \in \mathfrak{M}(E)$.
\item $\forall b \in \mathbb{R}$, $f^{-1}((-\infty, b]) \in
\mathfrak{M}(E)$.
\item $\forall b \in \mathbb{R}$, $f^{-1}((-\infty, b)) \in
\mathfrak{M}(E)$.
\item $\forall a \in \mathbb{R}$, $f^{-1}([a, \infty)) \in \mathfrak{M}(E)$.
\end{enumerate}
\end{propo}
\begin{proof}
\hlbnoted{$(1) \implies (2)$} This is trivially true since $\forall a \in
\mathbb{R}$, $(a, \infty)$ is open in $\mathbb{R}$, and so since $f$ is
measurable, we must have that $f^{-1}((a, \infty)) \in \mathfrak{M}(E)$.
\noindent
\hlbnoted{$(2) \implies (3)$} Notice that $\forall b \in \mathbb{R}$,
\begin{equation*}
f^{-1}((-\infty, b]) = f^{-1}(\mathbb{R} \setminus (b, \infty)) = E
\setminus f^{-1}((b, \infty))
\end{equation*}
and $f^{-1}((b, \infty)) \in \mathfrak{M}(E)$ by assumption. Since
$\mathfrak{M}(E)$ is a $\sigma$-algebra, $f^{-1}((-\infty, b]) \in
\mathfrak{M}(E)$.
\noindent
\hlbnoted{$(3) \implies (4)$} Notice that $\forall b \in \mathbb{R}$,
\begin{equation*}
f^{-1}((-\infty, b)) = \bigcup_{n=1}^{\infty} f^{-1}\left(\left(-\infty, b -
\frac{1}{n}\right]\right),
\end{equation*}
and by assumption, for each $n \geq 1$, $f^{-1}\left( \left( -\infty, b -
\frac{1}{n} \right] \right) \in \mathfrak{M}(E)$. It follows that
$f^{-1}((-\infty, b)) \in \mathfrak{M}(E)$.
\noindent
\hlbnoted{$(4) \implies (5)$} Observe that $\forall a \in \mathbb{R}$, we have
\begin{equation*}
f^{-1}([a, \infty)) = f^{-1}(\mathbb{R}\setminus(-\infty, a)) \in
\mathfrak{M}(E)
\end{equation*}
by assumption.
\noindent
\hlbnoted{$(5) \implies (1)$}
\sidenote{This uses the same idea as in
\cref{propo:some_lebesgue_measurable_sets}.} Notice that $\forall a \in
\mathbb{R}$,
\begin{align*}
f^{-1}((a, \infty)) &= \bigcup_{n=1}^{\infty} f^{-1} \left( \left[ a +
\frac{1}{n}, \infty \right) \right) \in \mathfrak{M}(E)
\end{align*}
by assumption. Furthermore, we have that $\forall b \in \mathbb{R}$,
\begin{equation*}
f^{-1}((-\infty, b)) = E \setminus f^{-1}([b, \infty)) \in \mathfrak{M}(E),
\end{equation*}
also by assumption. Thus
\begin{equation*}
f^{-1}((a, b)) = f^{-1}((a, \infty)) \cap f^{-1}((-\infty, b)) \in
\mathfrak{M}(E),
\end{equation*}
for any $a, b \in \mathbb{R}$.
Since for any open $G \subseteq \mathbb{R}$ can be written as a countable
union of open intervals, i.e.
\begin{equation*}
G = \bigcup_{n=1}^{\infty} I_n,
\end{equation*}
where each $I_n$ is an open interval, we have that
\begin{equation*}
f^{-1}(G) = \bigcup_{n=1}^{\infty} f^{-1}(I_n) \in \mathfrak{M}(E).
\end{equation*}
Thus $f$ is measurable.
\end{proof}
The proof of the following result is left to A2.
\begin{crly}[Measurability Check on the Borel Set]\label{crly:measurability_check_on_the_borel_set}
If $E \in \mathfrak{M}(\mathbb{R})$ and $f : E \to \mathbb{R}$ is a function,
then TFAE:
\begin{enumerate}
\item $f$ is measurable.
\item $\forall B \in \Bor(\mathbb{R})$, $f^{-1}(B) \in \mathfrak{M}(E)$.
\end{enumerate}
\end{crly}
\begin{remark}\label{remark:f_plus_f_minus}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f : E \to \mathbb{R}$. Define
\begin{gather*}
f^+(x) = \max \{ f(x), 0 \},\, x \in E \\
f^-(x) = \max \{ -f(x), 0\},\, x \in E
\end{gather*}
Then $f^+, f^- \geq 0$, and
\begin{equation*}
f = f^+ - f^- \text{ and } \abs{f} = f^+ + f^-.
\end{equation*}
Moreover,
\begin{equation*}
f^+ = \frac{\abs{f} + f}{2} \text{ and } f^- = \frac{\abs{f} - f}{2},
\end{equation*}
and so both $f^+$ and $f^-$ are measurable.
By \cref{remark:about_complex_val_fns}, every complex-valued measurable function is a
linear combination of 4 non-negative, real-valued measurable functions.
\end{remark}
\newthought{We shall} now examine a number of results dealing with pointwise
limits of sequences of measurable, real-valued functions. We shall include the
case where the limit of a given point is allowed to be an \hlnotea{extended real
number}; i.e. the sequence diverges either to $\infty$ or $-\infty$.
\begin{defn}[Extended Real Numbers]\index{Extended Real Numbers}\label{defn:extended_real_numbers}
We define the \hlnoteb{extended real numbers} to be the set
\begin{equation*}
\overline{\mathbb{R}} \coloneqq \mathbb{R} \cup \{ -\infty, \infty \}.
\end{equation*}
We also write $\overline{\mathbb{R}} = [-\infty, \infty]$.
By convention, we shall define
\begin{itemize}
\item $\infty + \infty = \infty$, $-\infty - \infty = -\infty$;
\item $\forall \alpha \in \mathbb{R} \cup \{ \infty \}$, $\alpha + \infty =
\infty = \infty + \alpha$;
\item $\forall \alpha \in \mathbb{R}$, $\alpha + (-\infty) = -\infty =
-\infty + \alpha$;
\item $\forall 0 < \alpha \in \overline{\mathbb{R}}$, $a \cdot \infty =
\infty \cdot \alpha = (-\infty) \cdot (-\alpha) = (-\alpha) \cdot
(-\infty) = \infty$;
\item $\forall \alpha < 0 \in \overline{\mathbb{R}}$, $a \cdot \infty =
\infty \cdot \alpha = (-\infty) \cdot (-\alpha) = (-\alpha) \cdot
(-\infty) = -\infty$; and
\item $0 = 0 \cdot \infty = \infty \cdot 0 = 0 \cdot (-\infty) = (-\infty)
\cdot 0$.
\end{itemize}
\end{defn}
\begin{warning}
Notice that we do not define $\infty - \infty$ and $-\infty + \infty$.
\end{warning}
\begin{note}
While the space of extended real numbers is useful for treating
measure-theoretic and analytic properties of sequences of functions, it has
poor algebraic properties. In particular, it is no longer a vector space,
since $\infty$ and $-\infty$ do not have their additive inverses.
\end{note}
\begin{defn}[Extended Real-Valued Function]\index{Extended Real-Valued Function}\label{defn:extended_real_valued_function}
Given $H \subseteq \mathbb{R}$, the function $f : H \to \overline{\mathbb{R}}$
is called an \hlnoteb{extended real-valued function}.
\end{defn}
\begin{defn}[Measurable Extended Real-Valued Function]\index{Measurable Function}\label{defn:measurable_extended_real_valued_function}
If $E \in \mathfrak{M}(\mathbb{R})$ and $f : E \to \overline{\mathbb{R}}$ is
an extended real-valued function, we say that $f$ is \hlnoteb{Lebesgue
measurable} (or simply \hlnoteb{measurable}) if
\begin{enumerate}
\item $\forall G \subseteq \mathbb{R}$ open, $f^{-1}(G) \in
\mathfrak{M}(E)$ ; annd
\item $f^{-1}(\{-\infty\}), f^{-1}(\{\infty\}) \in \mathfrak{M}(E)$.
\end{enumerate}
We denote the set of Lebesgue measurable extended real-valued functions on $E$
by
\begin{equation*}
\mathcal{L}(E, \overline{\mathbb{R}}) = \{ f : E \to \overline{\mathbb{R}} :
f \text{ is measurable } \}.
\end{equation*}
Since we shall often refer to only the non-negative elements of
$\mathcal{L}(E, \overline{\mathbb{R}})$, we also define the notation
\begin{equation*}
\mathcal{L}(E, [0, \infty]) = \{ f \in \mathcal{L}(E, \overline{\mathbb{R}})
: \forall x \in E, \, 0 \leq f(x) \}.
\end{equation*}
\end{defn}
\begin{note}
Note that we can also replace the first condition of Lebesgue measurability of
extended real-valued functions by
\begin{equation*}
\forall F \subseteq \mathbb{R} \text{ closed },\, f^{-1}(F) \in
\mathfrak{M}(E).
\end{equation*}
\end{note}
Just as in the case with regular real-valued measurable functions, we have the
following shortcuts in testing whether an extended real-valued function is
measurable.
\begin{notation}
We write
\begin{itemize}
\item $(a, \infty] = (a, \infty) \cup \{ \infty \}$; and
\item $[-\infty, b) = (-\infty, b) \cup \{ -\infty \}$,
\end{itemize}
for all $a, b \in \mathbb{R}$.
\end{notation}
\begin{propo}[Measurability Check for Extended Real-Valued Functions]\label{propo:measurability_check_for_extended_real_valued_functions}
Let $E \in \mathfrak{M}(\mathbb{R})$ and suppose $f : E \to
\overline{\mathbb{R}}$ is a function. Then TFAE:
\begin{enumerate}
\item $f$ is Lebesgue measurable.
\item $\forall a \in \mathbb{R}$, $f^{-1}((a, \infty]) \in \mathfrak{M}(E)$.
\item $\forall b \in \mathbb{R}$, $f^{-1}([-\infty, b)) \in
\mathfrak{M}(E)$.
\end{enumerate}
\end{propo}
\begin{ex}
Prove \cref{propo:measurability_check_for_extended_real_valued_functions}.
\end{ex}
\begin{propo}[Measurability of Limits and Extremas]\label{propo:measurability_of_limits_and_extremas}
Let $E \in \mathfrak{M}(\mathbb{R})$ and suppose that $(f_n)_{n=1}^{\infty}$
is a sequence in $\mathcal{L}(E, \overline{\mathbb{R}})$. Then the following
extended real-valued functions are also measurable:
\begin{enumerate}
\item $g_1 \coloneqq \sup_{n \geq 1} f_n$;
\item $g_2 \coloneqq \inf_{n \geq 1} f_n$;
\item $g_3 \coloneqq \limsup_{n \geq 1} f_n$; and
\item $g_4 \coloneqq \liminf_{n \geq 1} f_n$.
\end{enumerate}
\end{propo}
\begin{proof}
\begin{enumerate}
\item Let $a \in \mathbb{R}$. Then
\begin{equation*}
g_1^{-1}((a, \infty]) = \bigcup_{n \geq 1} \underbrace{f_n^{-1} ((a,
\infty])}_{\in \mathfrak{M}(E)} \in \mathfrak{M}(E).
\end{equation*}
It follows from
\cref{propo:measurability_check_for_extended_real_valued_functions} that
$g_1 \in \mathcal{L}(E, \overline{\mathbb{R}})$.
\item \sidenote{\hlwarn{Both notes and lecture notes used union, but should
it not be intersection?}} For any $b \in \mathbb{R}$, we have
\begin{equation*}
g_2^{-1}([-\infty, b)) = \bigcap_{n \geq 1} f_n^{-1}([-\infty, b)) \in
\mathfrak{M}(E).
\end{equation*}
Thus by
\cref{propo:measurability_check_for_extended_real_valued_functions}, $g_2
\in \mathcal{L}(E, \overline{\mathbb{R}})$.
\item Let $h_n = \sup_{k \geq n} f_n$ for each $n \geq 1$. Then by part
$(1)$, $h_n \in \mathcal{L}(E, \overline{\mathbb{R}})$ for each $n \geq
1$. Also, notice that $h_1 \geq h_2 \geq h_3 \geq \hdots$, i.e. $\{ h_n
\}_{n=1}^{\infty}$ is an increasing sequence of functions. Then by part
$(2)$,
\begin{equation*}
g_3 = \lim_{n \to \infty} h_n = \inf_{n \geq 1} h_n \in \mathcal{L}(E,
\overline{\mathbb{R}}).
\end{equation*}
\item Let $h_n = \inf_{k \geq n} f_n$ for each $n \geq 1$. Then by part
$(2)$, each $h_n \in \mathcal{L}(E, \overline{\mathbb{R}})$. Also, $\{ h_n
\}_{n=1}^{\infty}$ is a decreasing sequence of functions. Then by part
$(1)$, we have that
\begin{equation*}
g_4 = \lim_{n \to \infty} h_n = \sup_{n \geq 1} h_n \in \mathcal{L}(E,
\overline{\mathbb{R}}).
\end{equation*}
\end{enumerate}
\end{proof}
\begin{crly}[Extended Limit of Real-Valued Functions]\label{crly:extended_limit_of_real_valued_functions}
Let $E \in \mathfrak{M}(\mathbb{R})$ and suppose that $(f_n)_{n=1}^{\infty}$
is a sequence of real-valued functions such that $f(x) = \lim_{n \to \infty}
f_n(x)$ exists as an extended real-valued number for all $x \in E$. Then
\begin{equation*}
f \in \mathcal{L}(E, \overline{\mathbb{R}}).
\end{equation*}
\end{crly}
\begin{proof}
By A2, when the said limit exists, we have that
\begin{equation*}
f = \limsup_{n \geq 1} f_n = \liminf_{n \geq 1} f_n,
\end{equation*}
and so $f \in \mathcal{L}(E, \overline{\mathbb{R}})$ by
\cref{propo:measurability_of_limits_and_extremas}.
\end{proof}
\begin{defn}[Simple Functions]\index{Simple Functions}\label{defn:simple_functions}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $\phi : E \to \overline{\mathbb{R}}$.
We say that $\phi$ is \hlnoteb{simple} if $\Range \phi$ is finite.
Furthermore, we denote the set of all simple, real-valued, \hlimpo{measurable}
functions on $E$ as
\begin{equation*}
\SIMP(E, \mathbb{R}).
\end{equation*}
\end{defn}
\begin{defn}[Standard Form]\index{Standard Form}\label{defn:standard_form}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $\phi : E \to \overline{\mathbb{R}}$.
Suppose that
\begin{equation*}
\Range \phi = \{ \alpha_1 < \alpha_2 < \hdots < a_N \},
\end{equation*}
and set
\begin{equation*}
E_n \coloneqq \phi^{-1}(\{\alpha_n\}), \text{ for } 1 \leq n \leq N.
\end{equation*}
We say that
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{E_n}
\end{equation*}
is the \hlnoteb{standard form} of $\phi$.
\end{defn}
\begin{warning}[Step Functions are Simple, but the Converse is False]\label{warning:step_functions_are_simple_but_the_converse_is_false}
Recall that a \hldefn{step function} is a function
that can be written as a finite linear combination of indicator functions
of intervals.
This means that step functions are simple functions.
However, simple functions are not necessarily step functions.
For example, $\chi_C$, where $C$ is the \hyperref[eg:cantor_set]{Cantor set},
is a simple function since $C$ is measurable,
but it is clearly not a step function, as it would require infinitely many
indicator functions of infinitely small intervals.
\end{warning}
\begin{propo}[Measurability of Simple Functions with Measurable Support]\label{propo:measurability_of_simple_functions_with_measurable_support}
Let $E \in \mathfrak{M}(\mathbb{R})$. Suppose $\phi : E \to
\overline{\mathbb{R}}$ is simple with
\begin{equation*}
\Range \phi = \{ \alpha_1 < \alpha_2 < \hdots < \alpha_N \}.
\end{equation*}
TFAE:
\begin{enumerate}
\item $\phi$ is measurable.
\item If $\phi = \sum_{n=1}^{N} \alpha_n \chi_{E_n}$ is the standard form of
$\phi$, then $E_n \in \mathfrak{M}(E)$, for all $n \in \{1, \ldots, N \}$.
\end{enumerate}
\end{propo}
\begin{proof}
\hlbnoted{$(\implies)$} Since $\phi$ is measurable, notice that for each $n
\in \{1, \ldots, N\}$,
\begin{itemize}
\item if $\alpha_n \in \mathbb{R}$, then $\{ \alpha_n \}$ is closed, and so
\begin{equation*}
E = \phi^{-1}(\{\alpha_n\}) \in \mathfrak{M}(E); \text{ and }
\end{equation*}
\item if $\alpha_1 = -\infty$, and similarly if $\alpha_N = \infty$, then by
\cref{defn:measurable_extended_real_valued_function},
$\phi^{-1}(\{\alpha_1\}), \phi^{-1}(\{\alpha_N\}) \in \mathfrak{M}(E)$.
\end{itemize}
\noindent
\hlbnoted{$(\impliedby)$} By
\cref{eg:characteristic_function_is_measurable_over_a_measurable_set},
$\forall n \geq 1,\, E_n \in \mathfrak{M}(E) \implies \forall n \geq 0
\chi_{E_n} \in \mathfrak{M}(E)$. Notice that $\forall a \in \mathbb{R}$,
\begin{equation*}
\phi^{-1}((a, \infty]) = \bigcup \{ E_n : a < \alpha_n \},
\end{equation*}
and so $\phi^{-1}((a, \infty])$ is a finite (or empty) union of measurable
sets, and is hence measurable.
\end{proof}
\newthought{The standard form} is not a unique way of expressing a simple
function as a finite linear combination of characteristic functions.
\begin{eg}
Consider the function $\phi : \mathbb{R} \to \mathbb{R}$ given by
\begin{equation*}
\phi = \chi_{\mathbb{Q}} + 9 \chi_{[2, 6]}.
\end{equation*}
Then $\Range \phi = \{ 0, 1, 9, 10 \}$; we see that
\begin{equation*}
x \mapsto \begin{cases}
0 & x \in \mathbb{Q}^C \cap [2, 6]^C \\
1 & x \in \mathbb{Q} \cap [2, 6]^C \\
9 & x \in \mathbb{Q}^C \cap [2, 6] \\
10 & x \in \mathbb{Q} \cap [2, 6]
\end{cases}.
\end{equation*}
Then we may write $\phi$ as
\begin{equation*}
\phi = 0 \chi_{\mathbb{Q}^C \cap [2, 6]^C} + 1 \chi_{\mathbb{Q} \cap [2,
6]^C} + 9 \chi_{\mathbb{Q}^C \cap [2, 6]} + 10 \chi_{\mathbb{Q} \cap [2,
6]}.
\end{equation*}
\end{eg}
\begin{defn}[Real Cone]\index{Real Cone}\label{defn:real_cone}
Let $\mathcal{V}$ be a vector space over $\mathbb{K}$. A subset $C \subseteq
\mathcal{V}$ is said to be a \hlnoteb{(real) cone} is
\begin{marginfigure}
\centering
\begin{tikzpicture}
\draw[->] (-2, 0) -- (2, 0);
\draw[->] (0, -2) -- (0, 2);
\draw[fill=blue!30!background] (2, 1) -- (0, 0) -- (1, 2)
node[right=10pt] {$C$};
\draw[fill=blue!15!background] (-1, -2) -- (0, 0) -- (-2, -1)
node[below=10pt] {$-C$ };
\end{tikzpicture}
\caption{Typical figure of a cone}\label{fig:typical_figure_of_a_cone}
\end{marginfigure}
\begin{enumerate}
\item $C \cap -C = \{ 0 \}$, where $-C = \{ - w : w \in C \}$; and
\item $y, z \in C$ and $0 \leq \kappa \in \mathbb{R}$ imply that
\begin{equation*}
\kappa y + z \in C.
\end{equation*}
\end{enumerate}
\end{defn}
\begin{eg}
\begin{enumerate}
\item Let $\mathcal{V} = \mathbb{R}^3$ and
\begin{equation*}
C = \{ (x, y, z) \in \mathbb{R}^3 : 0 \leq x, y, z \}.
\end{equation*}
Then $C$ is a (real) cone.
\item Let $\mathcal{V} = \mathbb{C}$ and
\begin{equation*}
C = \left\{ w \in \mathbb{C} : w = re^{i\theta},\, \frac{\pi}{6} \leq
\theta \leq \frac{2\pi}{6},\, 0 \leq r < \infty \right\}.
\end{equation*}
The $C$ is a cone in $\mathbb{C}$. Note that in both the above examples,
$C$ is not closed.
\item Let $\mathcal{V} = \mathcal{C}([0, 1], \mathbb{C})$, and
\begin{equation*}
C = \left\{ f \in \mathcal{V} : 0 \leq f(x),\, \forall x \in [0, 1]
\right\},
\end{equation*}
where we note that the condition means that we only look at those
functions that return real positive values. Then $C$ is a (real) cone in
$\mathcal{V}$.
\end{enumerate}
\end{eg}
\begin{ex}
Show that $\SIMP(E, \mathbb{R})$ is an
\hyperref[defn:algebra_of_sets]{algebra}, and hence a vector space over
$\mathbb{R}$.
\end{ex}
\begin{remark}
\begin{enumerate}
\item Note that
\begin{equation*}
\SIMP(E, \overline{\mathbb{R}}) = \{ f : E \to \overline{\mathbb{R}} : f
\text{ is simple and measurable } \}.
\end{equation*}
is not a vector space. In fact, it is neither a field nor a ring.
\item We shall adopt the following notation:
\begin{equation*}
\SIMP(E, [0, \infty)) \coloneqq \left\{ \phi \in \SIMP(E, \mathbb{R}) :
0 \leq \phi(x) \text{ for all } x \in E \right\}.
\end{equation*}
Observe that this is a real cone in $\SIMP(E, \mathbb{R})$.
\end{enumerate}
\end{remark}
In A3, we will show the following proposition.
\begin{propo}[Increasing Sequence of Simple Functions that Converges to a Measurable Function]\label{propo:increasing_sequence_of_simple_functions_that_converges_to_a_measurable_function}
Let $E \in \mathfrak{M}(E)$ and $f \in \mathcal{L}(E, [0, \infty])$. Then
there exists an increasing sequence
\begin{equation*}
\phi_1 \leq \phi_2 \leq \phi_3 \leq \hdots \leq f
\end{equation*}
of simple, real-valued functions $\phi_n$ such that
\begin{equation*}
f(x) = \lim_{n \to \infty} \phi_n(x)
\end{equation*}
for all $x \in E$.
\end{propo}
% section lebesgue_measurable_functions_continued (end)
% chapter lecture_6_may_23rd_2019 (end)
\chapter{Lecture 7 May 28th 2019}%
\label{chp:lecture_7_may_28th_2019}
% chapter lecture_7_may_28th_2019
\section{Lebesgue Integration}%
\label{sec:lebesgue_integration}
% section lebesgue_integration
We shall first begin by defining integration over simple, non-negative, extended
real-valued functions. We shall then use this definition to define the integral
of $f \in \mathcal{L}(E, [0, \infty])$, and derive several consequences of our
definition. Furthermore, we shall also build the Lebesgue integral such that it
is linear, which will require us to impose certain conditions to the range of
functions which will retain this desirable property.
\begin{defn}[Integration of Simple Functions]\label{defn:integration_of_simple_functions}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $\phi \in \SIMP(E, [0, \infty])$,
such that its standard form is denoted as
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{E_n}.
\end{equation*}
We define
\begin{equation*}
\int_{E} \phi \coloneqq \sum_{n=1}^{N} \alpha_n m E_n \in [0, \infty].
\end{equation*}
If $F \subseteq E$ is measurable, we define
\begin{equation*}
\int_{F} \phi = \int_{E} \phi \cdot \chi_F = \sum_{n=1}^{N} \alpha_n m(F
\cap E_n).
\end{equation*}
\end{defn}
\begin{note}
Note that since $\phi$ is measurable, so is each $E_n$ for $1 \leq n \leq N$.
\end{note}
\begin{eg}
\begin{enumerate}
\item Let $\phi = 0 \chi_{[4, \infty)} + 17 \chi_{\mathbb{Q} \cap [0, 4)} +
29 \chi_{[2, 4) \setminus \mathbb{Q}}$. Then
\begin{align*}
\int_{[0, \infty)} \phi &= 0 m[4, \infty) + 17 m(\mathbb{Q} \cap [0, 4))
+ 29 m([2, 4) \setminus \mathbb{Q}) \\
&= 0 + 17 \cdot 0 + 29 (2) = 58.
\end{align*}
\item Let $C \subseteq [0, 1]$ be the Cantor set from \cref{eg:cantor_set}
and $\phi = 1 \chi_C + 2 \chi_{[5, 9]}$. Then
\begin{align*}
\int_{[0, 6]} \phi
&= 1 m(C \cap [0, 6]) + 2 m([5, 9] \cap [0, 6]) \\
&= 1 \cdot 0 + 2 m([5, 6]) \\
&= 2.
\end{align*}
\end{enumerate}
\end{eg}
Since our definition is fairly limited since it requires that our simple
function be in standard form, let us try to relax that condition.
\begin{defn}[Disjoint Representation]\index{Disjoint Representation}\label{defn:disjoint_representation}
Let $E \in \mathfrak{M}(E)$ and $\phi \in \SIMP(E, [0, \infty])$. Suppose
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{H_n},
\end{equation*}
where $H_n \subseteq E$ is \hlimpo{measurable} and $\alpha_n \in
\overline{\mathbb{R}}$ for each $1 \leq n \leq N$. \sidenote{Note that we did
not require that the $\alpha_n$'s be distinct, nor do we require that they be
written in any particular order, nor do we require that $E =
\bigcup_{n=1}^{N} H_n$, unlike in the definition of
\hyperref[defn:simple_functions]{simple functions}.} We shall say that the
above decomposition of $\phi$ is a \hlnoteb{disjoint representation} of
$\phi$ if
\begin{equation*}
H_i \cap H_j = \emptyset, \text{ for } 1 \leq i \neq j \leq N.
\end{equation*}
\end{defn}
\begin{lemma}[Common Disjoint Representation of Simple Functions over a Common Domain]\label{lemma:common_disjoint_representation_of_simple_functions_over_a_common_domain}
Let $E \in \mathfrak{M}(\mathbb{R})$ and suppose that $\phi, \psi \in
\mathcal{L}(E, \mathbb{R})$. Then there exists
\begin{enumerate}
\item $N \in \mathbb{N}$ ;
\item $H_1, H_2, \ldots, H_n \in \mathfrak{M}(E)$ with $H_i \cap H_j =
\emptyset$ for all $i \neq j$; and
\item $\alpha_1, \ldots, \alpha_N, \beta_1, \ldots, \beta_N$ such that
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{H_n} \quad \text{ and } \quad
\psi = \sum_{n=1}^{N} \beta_N \chi_{H_n}
\end{equation*}
are disjoint representations of $\phi$ and $\psi$.
\end{enumerate}
\end{lemma}
\begin{proof}
Since $\phi$ and $\psi$ are simple, from \cref{defn:standard_form}, if we
write
\begin{equation*}
\phi = \sum_{m=1}^{M_1} a_m \chi_{E_m} \quad \text{ and } \quad
\psi = \sum_{m=1}^{M_2} b_m \chi_{F_m}
\end{equation*}
in their standard forms, we have that the $E_m$'s and $F_m$'s are respectively
pairwise disjoint \sidenote{It is important to note here that the $E_m$'s and
$F_m$'s are pairwise disjoint on $E$, which is why the next step is a sensible
and correct one.}. Then
\begin{equation*}
\{ E_i \cap F_j : 1 \leq i \leq M_1,\, 1 \leq j \leq M_2 \}
\end{equation*}
is also a pairwise disjoint family of measurable sets, which we shall relabel
them as
\begin{equation*}
\{ H_n \}_{n=1}^{N}, \text{ where } N = M_1 M_2.
\end{equation*}
Then
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{H_n},
\end{equation*}
where $\alpha_n = a_i$ if $H_n = E_i \cap F_j$ for some $1 \leq j \leq M_2$,
and
\begin{equation*}
\psi = \sum_{n=1}^{N} \beta_N \chi_{H_n},
\end{equation*}
where $\beta_n = b_j$ if $H_n = E_i \cap F_j$ for some $1 \leq i \leq M_1$.
\end{proof}
\begin{lemma}[Integral of a Simple Funciton Using Its Disjoint Representation]\label{lemma:integral_of_a_simple_funciton_using_its_disjoint_representation}
Let $E \in \mathfrak{M}(\mathbb{R})$ and suppose $\phi \in \SIMP(E, [0,
\infty])$. If
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{H_n}
\end{equation*}
is any disjoint representation, then
\begin{equation*}
\int_{E} \phi = \sum_{n=1}^{n} \alpha_n mH_n.
\end{equation*}
\end{lemma}
\begin{proof}
\sidenote{One of the problems here is that the disjoint $H_n$'s may not cover
the entire domain $\phi$, but we can fill it up with zeros.} If
$\bigcup_{n=1}^{N} H_n \neq E$, then we set
\begin{equation*}
H_{N + 1} = E \setminus \bigcup_{n=1}^{N} H_n \text{ and } \alpha_{N+1} = 0.
\end{equation*}
Then
\begin{equation*}
\sum_{n=1}^{N} \alpha_n mH_n = \sum_{n=1}^{N+1} \alpha_n mH_n.
\end{equation*}
Thus, wlog, wma
\begin{equation*}
\bigcup_{n=1}^{N} H_n = E.
\end{equation*}
Now since the $H_n$'s are mutually disjoint, wma
\begin{equation*}
\Range \phi = \{ \alpha_1, \ldots, \alpha_N \},
\end{equation*}
where we note that the above set may contain repeated elements, i.e. some
$\alpha_i = \alpha_j$. We may thus rewrite this set such that
\begin{equation*}
\{ \alpha_1, \ldots, \alpha_N \} = \{ \beta_1 < \beta_2 < \hdots < \beta_M
\}
\end{equation*}
and set
\begin{equation*}
E_i = \bigcup \{ H_j : \alpha_j = \beta_i \}.
\end{equation*}
Note that since $H_i \cap H_j = \emptyset$ for $1 \leq i \neq j \leq N$, for
$1 \leq k \leq M$, we have
\begin{equation*}
m E_k = \sum_{\alpha_j = \beta_k} m(H_j).
\end{equation*}
Then by definition,
\begin{align*}
\int_{E} \phi
&= \sum_{k=1}^{M} \beta_k \xi_{E_k} \\
&= \sum_{i=1}^{M} \beta_i \sum_{\alpha_j = \beta_i} mH_j \\
&= \sum_{n=1}^{N} \alpha_j mH_j,
\end{align*}
as desired.
\end{proof}
\begin{propo}[Linearity and Monotonicity of the Integral of Simple Functions]\label{propo:linearity_and_monotonicity_of_the_integral_of_simple_functions}
Let $E \in \mathfrak{M}(\mathbb{R})$. If $\phi, \psi \in \SIMP(E, [0,
\infty])$ and $\kappa \in [0, \infty)$, then
\begin{enumerate}
\item $\int_{E} \kappa \phi + \psi = \kappa \int_{E} \phi + \int_{E} \psi$; and
\item $\phi \leq \psi$ on $E$ implies
\begin{equation*}
\int_{E} \phi \leq \int_{E} \psi.
\end{equation*}
\end{enumerate}
\end{propo}
\begin{proof}
\begin{enumerate}
\item By
\cref{lemma:common_disjoint_representation_of_simple_functions_over_a_common_domain},
we can find a common disjoint representation of $\phi$ and $\psi$, say
\begin{equation*}
\phi = \sum_{n=1}^{N} a_n \chi_{H_n} \quad \text{and} \quad
\psi = \sum_{n=1}^{N} b_n \chi_{H_n},
\end{equation*}
where the $H_n$'s are pairwise disjoint. Then
\begin{equation*}
\kappa \phi + \psi = \sum_{n=1}^{N} (\kappa a_n + b_n) \chi_{H_n}.
\end{equation*}
Thus by
\cref{lemma:integral_of_a_simple_funciton_using_its_disjoint_representation},
\begin{align*}
\int_{E} (\kappa \phi + \psi)
&= \sum_{n=1}^{N} (\kappa a_n + b_n) m H_n \\
&= \kappa \sum_{n=1}^{N} a_n mH_n * \sum_{n=1}^{N} b_n mH_n \\
&= \kappa \int_{E} \phi + \int_{E} \psi.
\end{align*}
\item Using the disjoint representation, if $\phi \leq \psi$, then $a_n \leq
b_n$ for all $1 \leq n \leq N$, and so by
\cref{lemma:integral_of_a_simple_funciton_using_its_disjoint_representation},
\begin{equation*}
\int_{E} \phi = \sum_{n=1}^{N} a_n mH_n \leq \sum_{n=1}^{N} b_n mH_n =
\psi.
\end{equation*}
\end{enumerate}
\end{proof}
We are now ready to define the Lebesgue integral for arbitrary measurable
functions.
\begin{defn}[Lebesgue Integral]\index{Lebesgue Integral}\label{defn:lebesgue_integral}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f \in \mathcal{L}(E, [0, \infty])$.
We define the \hlnoteb{Lebesgue integral} of $f$ as
\begin{equation*}
\int_{E}^{NEW} f = \sup \left\{ \int_{E} \phi : \phi \in \SIMP(e, [0,
\infty)),\, 0 \leq \phi \leq f \right\}.
\end{equation*}
\end{defn}
\begin{note}
\begin{itemize}
\item We can actually allow $\phi \in \SIMP(E, [0, \infty])$.
\item We put ``NEW'' in the above integral because we now have ``two''
definitions for the integral of $\phi \in \SIMP(E, [0, \infty])$. Writing
$\phi = \sum_{n=1}^{N} \alpha_n \chi_{H_n}$ in its standard form, by
\cref{defn:integration_of_simple_functions},
\begin{equation*}
\int_{E} \phi = \sum_{n=1}^{N} \alpha_n mH_n,
\end{equation*}
while \cref{defn:lebesgue_integral} gives us
\begin{equation*}
\int_{E}^{NEW} \phi = \sup \left\{ \int_{E} \psi : \psi \in \SIMP(E, [0,
\infty)),\, 0 \leq \psi \leq \phi \right\}.
\end{equation*}
\end{itemize}
\end{note}
\begin{remark}
Let us try reconciling these two definitions, which will allow us to drop the
dumb-looking ``NEW'' notation. First, note that
\begin{equation*}
\phi \in \{ \psi \in \SIMP(E, [0, \infty]) : 0 \leq \psi \leq \phi \},
\end{equation*}
and so by \cref{defn:lebesgue_integral}, then
\begin{equation*}
\int_{E} \phi \leq \int_{E}^{NEW} \phi.
\end{equation*}
On the other hand, by
\cref{propo:linearity_and_monotonicity_of_the_integral_of_simple_functions},
if $\phi \in \SIMP(E, [0, \infty])$ and $0 \leq \psi \leq \phi$, we have that
\begin{equation*}
\int_{E} \psi \leq \int_{E} \phi,
\end{equation*}
and so
\begin{equation*}
\int_{E}^{NEW} \phi = \sup \left\{ \int_{E} \psi : \psi \in \SIMP(E, 0,
\infty]),\, \psi \leq \phi \right\} \leq \int_{E} \phi.
\end{equation*}
Thus
\begin{equation*}
\int_{E}^{NEW} \phi = \int_{E} \phi.
\end{equation*}
\end{remark}
With that we shall drop the ``NEW'' notation from here on.
\begin{defn}[Almost Everywhere (a.e.)]\index{Almost Everywhere (a.e.)}\label{defn:almost_everywhere_a_e_}
Let $E \in \mathfrak{M}(\mathbb{R})$. We say that a property $(P)$ holds
\hlnoteb{almost everywhere (a.e.)} on $E$ if the set
\begin{equation*}
B \coloneqq \{ x \in E : (P) \text{ does not hold } \}
\end{equation*}
has Lebesgue measure zero.
\end{defn}
\begin{eg}
Let $E \in \mathfrak{M}(\mathbb{R})$. Given $f, g \in \mathcal{L}(E,
\overline{\mathbb{R}})$, we say that $f = g$ a.e. on $E$ if
\begin{equation*}
B \coloneqq \{ x \in E : f(x) \neq g(x) \}
\end{equation*}
has measure zero, i.e. $mB = 0$.
An example of this is
\begin{equation*}
\chi_{\mathbb{Q}} = 0 = \chi_{C}
\end{equation*}
a.e. on $\mathbb{R}$, where $C$ is the \hyperref[eg:cantor_set]{Cantor set}.
\end{eg}
\begin{lemma}[Monotonicity of the Lebesgue Integral and Other Lemmas]\label{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas}
Let $E \in \mathfrak{M}(\mathbb{R})$ and let $f, g, h : E \to [0, \infty]$ be
functions. Suppose that $g$ and $h$ are measurable.
\begin{enumerate}
\item Suppose further that $E = X \bigcupdot Y$, where $X, Y \in
\mathfrak{M}(E)$. Set $f_1 \coloneqq f \restriction_{X}$ and $f_2
\coloneqq f \restriction_{Y}$. Then $f \in \mathcal{L}(E, [0, \infty])$
iff $f_1$ and $f_2$ are measurable. When this is the case, then
\begin{equation*}
\int_{E} f = \int_{X} f_1 + \int_{Y} f_2.
\end{equation*}
\item If $g \leq h$, then
\begin{equation*}
\int_{E} g \leq \int_{E} h.
\end{equation*}
\item If $H \in \mathfrak{M}(E)$, then
\begin{equation*}
\int_{H} g = \int_{E} g \cdot \chi_H \leq \int_{E} g.
\end{equation*}
\end{enumerate}
\end{lemma}
\begin{ex}
Prove \cref{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas}.
\end{ex}
\begin{proof}
\begin{enumerate}
\item \hlbnoted{$f$ is measurable $\iff$ $f_1$ and $f_2$ are measurable}
\hlbnotea{($\implies$)} Note that
\begin{equation*}
f_1 = f \cdot \chi_X \text{ and } f_2 = f \cdot \chi_Y,
\end{equation*}
and since $X, Y$ are measurable, by
\cref{propo:composition_of_a_continuous_function_and_a_measurable_function_is_measurable},
we have that $f_1$ and $f_2$ are measurable.
\hlbnotea{($\impliedby$)} Suppose $f_1$ and $f_2$ are measurable and $X
\bigcupdot Y$. We have that
\begin{equation*}
f = f_1 + f_2.
\end{equation*}
I will spare the details, but it is not difficult to see that $\forall a
\in \mathbb{R}$, breaking $(a, \infty]$ into disjoint pieces if necessary,
$f^{-1}((a, \infty])$ is measurable, and hence $f$ is indeed measurable.
\hlbnoted{The integral} \sidenote{This proof is iffy.} By
\cref{defn:integration_of_simple_functions} and
\cref{propo:linearity_and_monotonicity_of_the_integral_of_simple_functions},
we have
\begin{align*}
\int_{E} f
&= \sup \left\{ \int_{E} \phi : \phi \in \SIMP(E, [0, \infty]),\, \phi
\leq f \right\} \\
&= \sup \left\{ \int_{E} \phi \cdot \chi_X + \phi \cdot \chi_Y : \phi
\in \SIMP(E, [0, \infty]),\, \phi \leq f \right\} \\
&= \sup \left\{ \int_{X} \phi + \int_{Y} \phi : \phi \in \SIMP(E, [0,
\infty]),\, \phi \leq f \right\} \\
&\leq \sup \left\{ \int_{X} \phi : \phi \in \SIMP(X, [0, \infty]),\,
\phi \leq f_1 \right\} \\
&\qquad + \sup \left\{ \int_{Y} \psi : \psi \in \SIMP(Y, [0, \infty]),\,
\psi \leq f_2 \right\} \\
&= \int_{X} f_1 + \int_{Y} f_2.
\end{align*}
On the other hand, since $f_1 = f$ on $X$ and $f_2 = f$ on $Y$, and $X$
and $Y$ are disjoint,
\begin{align*}
&\int_{X} f_1 + \int_{Y} f_2 \\
&= \sup \left\{ \int_{X} \phi : \phi \in \SIMP(X, [0, \infty]),\,
\phi \leq f_1 = f \restriction_X \right\} \\
&\qquad + \sup \left\{ \int_{Y} \psi : \psi \in \SIMP(Y, [0, \infty]),\,
\psi \leq f_2 = f \restriction_Y \right\} \\
&= \sup \Bigg\{ \int_{X} \phi + \int_{Y} \psi : \phi \in \SIMP(X, [0,
\infty]), \\
&\qquad \psi \in \SIMP(Y, [0, \infty]),\, \phi \leq f
\restriction_X,\, \psi \leq f \restriction_Y \Bigg\} \\
&= \sup \Bigg\{ \int_{E} \phi \cdot \chi_X + \psi \cdot \chi_Y : \phi,
\psi \in \SIMP(E, [0, \infty]), \\
&\qquad \phi + \psi \leq f \restriction_X + f \restriction_Y = f \Bigg\}
\\
&= \int_{E} f.
\end{align*}
\item By
\cref{propo:increasing_sequence_of_simple_functions_that_converges_to_a_measurable_function},
there exists sequences $\{ \phi_n \}_n$ and $\{ \psi_n \}_n$ such that
\begin{equation*}
\lim_{n \to \infty} \phi_n = g \leq h = \lim_{n \to \infty} \psi_n.
\end{equation*}
In particular,
\begin{equation*}
\sup_{n \geq 1} \phi_n = g \leq h = \sup_{n \geq 1} \psi_n.
\end{equation*}
Since the leftmost and rightmost terms are simple functions, by
\cref{propo:linearity_and_monotonicity_of_the_integral_of_simple_functions},
\begin{align*}
\int_{E} g
&= \sup \left\{ \int_{E} \phi : \phi \in \SIMP(E, [0, \infty]),\, \phi
\leq g \right\} \\
&\leq \sup \left\{ \int_{E} \psi : \psi \in \SIMP(E, [0, \infty]),\,
\psi \leq h \right\} \\
&= \int_{E} h.
\end{align*}
\item \sidenote{This is also iffy.} For the first equality, by
\cref{defn:integration_of_simple_functions}, we have that
\begin{align*}
\int_{H} g
&= \sup \left\{ \int_{H} \phi : \phi \in \SIMP(H, [0, \infty]),\, \phi
\leq g \right\} \\
&= \sup \left\{ \int_{E} \phi \cdot \chi_H : \phi \in \SIMP(E, [0,
\infty]),\, \phi \leq g \right\} \\
&= \int_{E} g \cdot \chi_H.
\end{align*}
Note that we have $g \cdot \chi_H \leq g$, and so by part (2),
\begin{equation*}
\int_{E} g \cdot \chi_H \leq \int_{E} g.
\end{equation*}
\end{enumerate}
\end{proof}
\begin{propo}[Integration over Domains of Measure Zero and Integration of Functions Agreeing Almost Everywhere]\label{propo:integration_over_domains_of_measure_zero_and_integration_of_functions_agreeing_almost_everywhere}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f, g \in \mathcal{L}(E, [0,
\infty])$.
\begin{enumerate}
\item If $mE = 0$, then $\int_{E} f = 0$.
\item If $f = g$ a.e. on $E$, then $\int_{E} f = \int_{E} g$.
\end{enumerate}
\end{propo}
\begin{proof}
\begin{enumerate}
\item $\forall \phi \in \SIMP(E, [0, \infty])$ written in its standard form
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{E_n},
\end{equation*}
by \hyperref[defn:algebra_of_sets]{monotonicity},
\begin{equation*}
0 \leq \int_{E} \phi = \sum_{n=1}^{N} \alpha_n mE_n \leq \sum_{n=1}^{N}
\alpha_n mE = 0,
\end{equation*}
and so
\begin{equation*}
\int_{E} \phi = 0.
\end{equation*}
Thus
\begin{equation*}
\int_{E} f = \sup \left\{ \int_{E} \phi : \phi \in \SIMP(E, [0,
\infty]),\, \phi \leq f \right\} = \sup \{ 0 \} = 0.
\end{equation*}
\item Let $B \coloneqq \{ x \in E : f(x) \neq g(x) \}$ so that $mB = 0$.
Then by \cref{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas} and
part (1), we have
\begin{align*}
\int_{E} f &= \int_{E \setminus B} f + \int_{B} \\
&= \int_{E \setminus B} f + 0 \\
&= \int_{E \setminus B} g + \int_{B} g \\
&= \int_{E} g.
\end{align*}
\end{enumerate}
\end{proof}
We are now in a position to prove the following important theorem, which we
shall do so next lecture.
\begin{thmnonum}[The Monotone Convergence Theorem]
Let $E \in \mathfrak{M}(\mathbb{R})$ and $(f_n)_n$ be a sequence in
$\mathcal{L}(E, [0, \infty])$ such that $f_n \leq f_{n+1}$ a.e. on $E$.
Suppose further that
\begin{equation*}
f : E \to [0, \infty]
\end{equation*}
is a function such that $f(x) = \lim_{n \to \infty} f_n(x)$ a.e. on $E$. Then
$f \in \mathcal{L}(E, [0, \infty])$ and
\begin{equation*}
\int_{E} f = \lim_{n \to \infty} \int_{E} f_n.
\end{equation*}
\end{thmnonum}
% section lebesgue_integration (end)
% chapter lecture_7_may_28th_2019 (end)
\chapter{Lecture 8 May 30th 2019}%
\label{chp:lecture_8_may_30th_2019}
% chapter lecture_8_may_30th_2019
\section{Lebesgue Integration (Continued)}%
\label{sec:lebesgue_integration_continued}
% section lebesgue_integration_continued
\begin{thm}[\imponote The Monotone Convergence Theorem]\index{The Monotone Convergence Theorem}\label{thm:the_monotone_convergence_theorem}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $(f_n)_n$ be a sequence in
$\mathcal{L}(E, [0, \infty])$ such that $f_n \leq f_{n+1}$ a.e. on $E$.
Suppose further that
\begin{equation*}
f : E \to [0, \infty]
\end{equation*}
is a function such that $f(x) = \lim_{n \to \infty} f_n(x)$ a.e. on $E$. Then
$f \in \mathcal{L}(E, [0, \infty])$ and
\begin{equation*}
\int_{E} f = \lim_{n \to \infty} \int_{E} f_n.
\end{equation*}
\end{thm}
\begin{strategy}
\begin{enumerate}
\item Argue why we can proof for the case where we do not have the ``a.e.''
assumption. There are 2 places here where have an ``a.e.'' assumption:
\begin{enumerate}
\item $f_n \leq f_{n+1}$ on $E$; and
\item $f(x) = \lim_{n \to \infty} f_n(x)$ a.e. on $E$.
\end{enumerate}
\item Look at where good things happen and bad things happen, and we'll be
able to show that $f$ is measurable.
\item Having gotten rid of the place where nasty things happen and showing
that $f$ is measurable. We will find that we need to show that
\begin{equation*}
\int_{H} f = \lim_{n \to \infty} \int_{H} f_n,
\end{equation*}
where $H$ is where our hopes and dreams live in.
\item One direction is easy, since $f_n < f$ for all $n$, on $H$. For the
other direction, we look at a simple function $\phi \leq f$, which is then
arbitrary. Then since $\lim_{n \to \infty} f_n = f$ (pointwise), we want
to be able to show something along the lines of
\begin{equation*}
\int_{H} f_n - \int_{H} \phi \geq 0.
\end{equation*}
\begin{quotebox}{red}{foreground}
Instead of trying to do this over the entire $H$, we can look at where
this happens on $H$ for each $n$. Since the $f_n$'s are increasing, and
$\phi$ arbitrarily fixed, $f_n - \phi$ should give us more and more
places where they are positive on $H$.
\end{quotebox}
\end{enumerate}
\end{strategy}
\begin{proof}
\hlbnoted{Step 1} Let
\begin{equation*}
Z = \left\{ x \in E : f(x) \neq \lim_{n \to \infty} f_n(x) \right\}.
\end{equation*}
By hypothesis, $mZ = 0$ and $Z \in \mathfrak{M}(E)$.
Now by \cref{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas}, $f_n \in
\mathcal{L}(E, [0, \infty])$ and so $f_n \restriction_{E \setminus Z} \in
\mathcal{L}(E \setminus Z, [0, \infty])$. Since by hypothesis we have $\forall
x \in E \setminus Z$,
\begin{equation*}
f(x) = \lim_{n \to \infty} f_n(x),
\end{equation*}
$f \restriction_{E \setminus Z} \in \mathcal{L}(E \setminus Z, [0,
\infty])$ by \cref{crly:extended_limit_of_real_valued_functions}.
\noindent
\hlbnoted{Step 2} For each $n \geq 1$, let
\begin{equation*}
Y_n \coloneqq \{ x \in E : f_n(x) > f_{n+1}(x) \}.
\end{equation*}
Then by hypothesis, $mY_n = 0$ and $Y_n \in \mathfrak{M}(E)$. Let
\begin{equation*}
Y = \bigcup_{n=1}^{\infty} Y_n.
\end{equation*}
Then since $\mathfrak{M}(E)$ is a $\sigma$-algebra, $Y \in \mathfrak{M}(E)$
and
\begin{equation*}
0 \leq mY \leq \sum_{n=1}^{\infty} mY_n = 0 \implies mY = 0.
\end{equation*}
\sidenote{Up till here, we have showed that we can, instead, turn our focus on
wherever nice things happen, and that $f$ is measurable as desired.}
At this point, by
\cref{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas},
\begin{equation*}
\int_{E} f = \int_{E \setminus (Y \cup Z)} f + \int_{Y \cup Z} f = \int_{E
\setminus (Y \cup Z)} f
\end{equation*}
and for each $n \geq 1$,
\begin{equation*}
\int_{E} f_n = \int_{E \setminus (Y \cup Z)} f_n + \int_{Y \cup Z} f_n =
\int_{E \setminus (Y \cup Z)} f_n.
\end{equation*}
Thus, it remains for us to show that
\begin{equation*}
\int_{E \setminus (Y \cup Z)} f = \lim_{n \to \infty} \int_{E \setminus (Y
\cup Z)} f_n.
\end{equation*}
\noindent
\hlbnoted{Step 3} Let $X = Y \cup Z$, which then $X \in \mathfrak{M}(E)$ and
\begin{equation*}
0 \leq mX \leq mY + mZ = 0 \implies mX = 0.
\end{equation*}
Let $H = E \setminus X$. Note that we then have $H \in \mathfrak{M}(E)$ and
$\forall x \in H$,
\begin{equation}\label{eq:mct_nice_incing_seq}
\forall n \geq 1 \quad f_n(x) \leq f_{n+1}(x)
\end{equation}
and
\begin{equation}\label{eq:mct_nice_limit}
f(x) = \lim_{n \to \infty} f_n(x).
\end{equation}
For notational convenience, let
\begin{equation*}
g_n = f_n \restriction_{H}
\end{equation*}
and
\begin{equation*}
g = f \restriction_{H}.
\end{equation*}
By \cref{eq:mct_nice_incing_seq} and \cref{eq:mct_nice_limit}, we have that
\begin{equation*}
g_1 \leq g_2 \leq \hdots \leq g_n \leq g_{n+1} \leq \hdots \leq g.
\end{equation*}
By \cref{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas}, $\forall x
\in H$
\begin{equation*}
\lim_{n \to \infty} g_n(x) = \sup_{n \geq 1} g_n(x) \leq g(x),
\end{equation*}
and so
\begin{equation*}
\lim_{n \to \infty} \int_{H} g_n = \sup_{n \geq 1} \int_{H} g_n \leq
\int_{H} g.
\end{equation*}
It remains to show that
\begin{equation*}
\int_{H} g \leq \lim_{n \to \infty} \int_{H} g_n.
\end{equation*}
If we can show that for any $\phi \in \SIMP(H, [0, \infty])$, we have
\begin{equation*}
\lim_{n \to \infty} \int_{H} g_n \geq \int_{H} \phi,
\end{equation*}
then our proof is done, since it would mean that
\begin{align*}
\int_{H} f = \int_{H} g = \lim_{n \to \infty} \int_{H} g_n = \lim_{n \to
\infty} \int_{H} f_n.
\end{align*}
\noindent
\hlbnoted{Step 4}
\sidenote[][-100pt]{Here, we do something like a race-check. We know that the $g_n$'s
grow to be arbitrarily close to $g$, and the set $\{ \phi \in SIMP(H, [0,
\infty]) : \phi \leq g \}$ also has elements arbitrarily close to $g$. It
would suffice to show that for every $\phi$, the limit of the integral of the
$g_n$'s is greater than the integral of $\phi$.} Let $\phi \in \SIMP(H, [0,
\infty])$ such that $\phi \leq g$. \sidenote{Note that we require this scaling
factor, because we cannot allow $\phi = g$, for otherwise our increasing
sequence of $g_n$'s will never be able to `catch up' to $\phi$, which is what we
want.} Let $0 < r < 1$, so that either
\begin{itemize}
\item $r \phi = 0 \leq g$; \sidenote{In the case where $g = 0$, we have that
$r\phi = 0$ and not something bigger.} or
\item $r\phi < g = \lim_{n \to \infty} g_n$.
\end{itemize}
Then, consider
\begin{equation*}
H_k = (g_k - r\phi)^{-1}[0, \infty].
\end{equation*}
Notice that since ${g_k}_k$ is a sequence of increasing functions, we have
\sidenote{The increasing-ness of the $g_k$'s guarantees that if $(g_k -
r\phi)(x) \geq 0$, then $(g_{k+1} - r\phi)(x) \geq 0$. This is sort of like a
rising water level scenario.}
\begin{equation*}
H_1 \subseteq H_2 \subseteq H_3 \subseteq \hdots
\end{equation*}
\begin{marginfigure}
\centering
\begin{tikzpicture}
\draw[->] (-0.5, 0) -- (2, 0);
\draw[->] (0, -0.5) -- (0, 2);
\draw (0, 0.5) -- (1, 0.5);
\draw[dashed] (1, 0.5) -- (1, 0.7);
\draw (1, 0.7) -- (1.5, 0.7);
\draw[dashed] (1.5, 0.7) -- (1.5, 1.6);
\draw (1.5, 1.6) -- (2, 1.6) node[right] {$r\phi$ };
\draw (0,0) .. controls (0.3,0.3) and (1.4,0.8) .. (2,0.8)
node[right] {$g_k$};
\draw (0,0.4) .. controls (0.3,0.9) and (1.4,1.2) .. (2,1.2)
node[right] {$g_{k+1}$};
\end{tikzpicture}
\caption{Increasing levels of $g_k$ `covers' more and more parts of $r\phi$}\label{fig:increasing_levels_of_g_k_covers_more_and_more_parts_of_rphi_}
\end{marginfigure}
Also, note that
\begin{equation*}
H = \bigcup_{k=1}^{\infty} H_k.
\end{equation*}
\newpage
\sidenote{By this construction, we have that $r\phi \leq g_k$ in $H_k$ for
each $k$. So we already have
\begin{equation*}
\lim_{k \to \infty} \int_{H_k} r\phi \leq \lim_{k \to \infty} \int_{H_k} g_k
\end{equation*}
in our bag. Notice that since $\phi$ is a simple function, by
\cref{defn:integration_of_simple_functions}, we have
\begin{equation*}
\int_{H_k} \phi = \int_{H} \phi \cdot \chi_{H_k}.
\end{equation*}
Since the $H_k$'s is an `increasing sequence' of sets, and especially since $H
= \bigcup_{k=1}^{\infty} H_k$, we expect
\begin{equation*}
\lim_{k \to \infty} \int_{H_k} \phi = \int_{H} \phi.
\end{equation*}}
WTS
\begin{equation*}
\int_{H} \phi = \lim_{k \to \infty} \int_{H_k} \phi.
\end{equation*}
Since $\phi \in \SIMP(H, [0, \infty])$, let us write
\begin{equation*}
\phi = \sum_{k=1}^{N} \alpha_k \chi_{J_k}
\end{equation*}
in its standard form, where $J_k \in \mathfrak{M}(H)$. Then
\begin{equation*}
\int_{H} \phi = \sum_{k=1}^{N} \alpha_k mJ_k,
\end{equation*}
while
\begin{equation*}
\int_{H_n} \phi = \sum_{k=1}^{N} \alpha_k m(J_k \cap H_n)
\end{equation*}
for each $n \geq 1$.
By the \hlnotea{continuity of the Lebesgue measure} (A1), notice that
\begin{equation*}
\lim_{n \to \infty} m(J_k \cap H_n)
= m\left(J_k \cap \left( \bigcup_{n=1}^{\infty} H_n \right) \right)
= m(J_k \cap H) = m(J_k)
\end{equation*}
Thus
\begin{equation*}
\lim_{n \to \infty} \int_{H_n} \phi = \sum_{k=1}^{N} \alpha_k m(J_n) =
\int_{H} \phi,
\end{equation*}
as claimed.
Then in particular, we have that
\begin{equation*}
\int_{H} r \phi = \lim_{k \to \infty} \int_{H_k} \phi \leq \lim_{k \to
\infty} \int_{H_k} g_k \leq \lim_{k \to \infty} \int_{H} g_k,
\end{equation*}
where the last inequality follows from
\cref{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas}.
This is exactly the final piece that we have set out to prove, and so we have
completed the proof.
\end{proof}
\begin{eg}
Recall our ``pathological'' sequence of Riemann integral functions earlier on,
where $E = \mathbb{Q} \cap [0, 1] = \{ q_n \}_{n=1}^{\infty}$, and sequence of
functions
\begin{equation*}
f_n = \chi_{\{q_1, \ldots, q_n\}}, \text{ for } n \geq 1,
\end{equation*}
and their limit
\begin{equation*}
f = c_{\mathbb{Q} \cap [0, 1]}.
\end{equation*}
We have that
\begin{equation*}
0 \leq f_1 \leq f_2 \leq \hdots \leq f,
\end{equation*}
and each \sidenote{
\begin{ex}
Show that $f_n \in \mathcal{L}([0, 1], [0, \infty))$.
\end{ex}
}
\begin{equation*}
f_n \in \mathcal{L}([0, 1], [0, \infty)).
\end{equation*}
By the \hyperref[thm:the_monotone_convergence_theorem]{Monotone Convergence
Theorem (MCT)}, $f$ is measurable and
\begin{equation*}
\int_{[0, 1]} f = \lim_{n \to \infty} \int_{[0, 1]} f_n = \lim_{n \to
\infty} 0 = 0.
\end{equation*}
This agrees with what we saw much earlier on, i.e.
\begin{equation*}
0 \leq \int_{[0, 1]} f = \int_{[0, 1]} \chi_E = mE \leq m\mathbb{Q} = 0.
\end{equation*}
Note that $f_n$ is Riemann integrable, but $f$ is not, but it is Lebesgue
integrable. In other words, this function $f$ is an example of a Lebesgue
integrable function that is not Riemann integrable.
\end{eg}
\begin{crly}[Linearity of the Lebesgue Integral and Other Results]\label{crly:linearity_of_the_lebesgue_integral_and_other_results}
Let $E \in \mathfrak{M}(\mathbb{R})$.
\begin{enumerate}
\item If $f, g \in \mathcal{L}(E, [0, \infty])$ and $\kappa \geq 0$, then
\begin{equation*}
\int_{E} \kappa f + g = \kappa \int_{E} f + \int_{E} g.
\end{equation*}
\item If $(h_n)_{n=1}^{\infty}$ is a sequence in $\mathcal{L}(E, [0,
\infty])$ and
\begin{equation*}
h(x) \coloneqq \lim_{N \to \infty} \sum_{n=1}^{N} h_n(x), \quad \forall
x \in E,
\end{equation*}
then $h \in \mathcal{L}(E, [0, \infty])$ and
\begin{equation*}
\int_{E} h = \sum_{n=1}^{\infty} \int_{E} h_n.
\end{equation*}
\item Let $f \in \mathcal{L}(E, [0, \infty])$. If $(H_n)_{n=1}^{\infty}$ is
a sequence $\mathfrak{M}(E)$ with $H_i \cap H_j = \emptyset$ when $1 \leq
i \neq j \leq \infty$ and $H = \bigcupdot_{n=1}^{\infty} H_n$, then
\begin{equation*}
\int_{H} f = \sum_{n=1}^{\infty} \int_{H_n} f.
\end{equation*}
\end{enumerate}
\end{crly}
\begin{proof}
\begin{enumerate}
\item By A3, there exists a sequence of simple, measurable functions
$(\phi_n)_n, (\psi_n)_n$ in $\mathcal{L}(E, [0, \infty])$ such that
\begin{gather*}
0 \leq \phi_1 \leq \phi_2 \leq \hdots \leq f \\
0 \leq \psi_1 \leq \psi_2 \leq \hdots \leq g
\end{gather*}
such that $\forall x \in E$,
\begin{gather*}
\lim_{n \to \infty} \phi_n(x) = f(x) \\
\lim_{n \to \infty} \psi_n(x) = g(x)
\end{gather*}
By
\cref{propo:linearity_and_monotonicity_of_the_integral_of_simple_functions},
we have that for each $n$, for any $\kappa \in E$, we have
\begin{equation*}
\int_{E} \kappa \phi_n + \psi_n = \kappa \int_{E} \phi_n + \int_{E}
\psi_n.
\end{equation*}
Furthermore, note that
\begin{equation*}
\lim_{n \to \infty} (\kappa \phi + \psi)(x) = (\kappa f + g)(x),
\end{equation*}
and $(\kappa \phi_n + \psi_n)_n$ is an increasing \sidenote{If you are
second-guessing yourself like I did, notice that that $n$ is fixed for
both of them, not just one of them.} sequence of non-negative, simple,
measurable functions converging pointwise to the function $\kappa f + g$.
Thus, by the MCT, we see that
\begin{align*}
\int_{E} (\kappa f + g)
&= \lim_{N \to \infty} (\kappa \phi_N + \psi_N) \\
&= \lim_{N \to \infty} \kappa \int_{E} \phi_N + \int_{E} \psi_N
= \kappa \int_{E} f + \int_{E} g.
\end{align*}
\item \sidenote{Since $\Range h_n \subseteq [0, \infty]$, the partial sums
form an increasing sequence of functions. Then, we can make use of the
MCT.} Let
\begin{equation*}
g_N = \sum_{n=1}^{N} h_n
\end{equation*}
for each $N \geq 1$.
\noindent
\hlbnoted{Showing that $g_N \in \mathcal{L}(E, [0, \infty])$} Let $(a,
\infty]$, for any $\alpha \in [0, \infty)$. Then since $g_N$ is a finite
sum of functions, we have that
\begin{equation*}
g_N((a, \infty]) = h_1((a, \infty]) \cup h_2((a, \infty]) \cup \hdots
\cup h_N((a, \infty]),
\end{equation*}
which is a countable union of measurable sets, and is hence measurable.
Then
\begin{equation*}
0 \leq g_1 \leq g_2 \leq \hdots \leq h
\end{equation*}
and $\forall x \in E$
\begin{equation*}
\lim_{N \to \infty} g_N(x) = h(x),
\end{equation*}
both of which are from our assumptions.
By the MCT and part (1), we have that
\begin{align*}
\int_{E} h &= \lim_{N \to \infty} \int_{E} g_N \\
&= \lim_{N \to \infty} \int_{E} \sum_{n=1}^{N} h_n \\
&= \lim_{N \to \infty} \sum_{n=1}^{N} \int_{E} h_n \\
&= \sum_{n=1}^{\infty} \int_{E} h_n
\end{align*}
as required.
\item \sidenote{Since the RHS of the goal integrates over $H_n \subseteq H$,
and the $H_i$'s are disjoint, we can break $f$ down by where $H_n$ is
defined.} Let
\begin{equation*}
h_n = f \cdot \chi_{H_n}
\end{equation*}
for each $n \geq 1$. Since each $H_n \in \mathfrak{M}(E)$, each
$\chi_{H_n}$, and $f$ being measurable implies that each $h_n$ is
measurable. Since $H_i \cap H_j = \emptyset$ for all $1 \leq i \neq j \leq
\infty$, we have that
\begin{equation*}
f = \sum_{n=1}^{\infty} h_n.
\end{equation*}
By part (2), we have that
\begin{equation*}
\int_{E} f = \sum_{n=1}^{\infty} \int_{E} h_n = \sum_{n=1}^{\infty}
\int_{E} f \cdot \chi_{H_n} = \sum_{n=1}^{\infty} \int_{H_n} f.
\end{equation*}
\end{enumerate}
\end{proof}
\begin{defn}[Lebesgue Integrable]\index{Lebesgue Integrable}\label{defn:lebesgue_integrable}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f \in \mathcal{L}(E,
\overline{\mathbb{R}})$. We say that $f$ is \hlnoteb{Lebesgue integrable} on
$E$ if \sidenote{Recall \cref{remark:f_plus_f_minus}.}
\begin{equation*}
\int_{E} f^+ < \infty \quad \text{and} \quad \int_{E} f^- < \infty,
\end{equation*}
in which case we set
\begin{equation*}
\int_{E} f \coloneqq \int_{E} f^+ - \int_{E} f^-.
\end{equation*}
We denote by $\mathcal{L}_1(E, \overline{\mathbb{R}})$ the set of all Lebesgue
integrable functions from $E$ to $\overline{\mathbb{R}}$, and
$\mathcal{L}_1(E, \mathbb{R})$ all Lebesgue integrable functions from $E$ to
$\mathbb{R}$.
\end{defn}
\begin{remark}
Let $E \in \mathfrak{M}(\mathbb{R})$.
\begin{enumerate}
\item By definition, every Lebesgue integrable function on $E$ is Lebesgue
measurable.
\item A \hlnotea{measurable function} $f$ is Lebesgue integrable iff
$\abs{f}$ is Lebesgue integrable. Notice that
\begin{equation*}
\int_{E} f = \int_{E} f^+ - \int_{E} f^-
\end{equation*}
while
\begin{equation*}
\int_{E} \abs{f} = \int_{E} f^+ + f^- = \int_{E} f^+ + \int_{E} f^-,
\end{equation*}
and so if either of these are integrable, then
\begin{equation*}
\int_{E} f^+ < \infty \quad \text{and} \quad \int_{E} f^- < \infty,
\end{equation*}
which then the other must also be integrable.
It is important to note that this is a distinguishing feature of Lebesgue
integration, in comparison to Riemann integration. For instance, if we
consider the function
\begin{equation*}
f(x) = \frac{\sin x}{x}, \text{ for } x \geq 1,
\end{equation*}
improper Riemann integration gives us that $\int_{1}^{\infty} f(x) \dif{x}
= \frac{\pi}{2}$. But from the POV of Lebesgue integration, notice that
\begin{align*}
&\int_{\left[\pi, (N+1)\pi\right]} \abs{\frac{(\sin x)^+}{x}} \\
&= \sum_{k=1}^{N} \int_{[\pi k, \pi(k+1)]} \abs{\frac{(\sin x)^+}{x}} \\
&= \sum_{k=1}^{N} \int_{[0, \pi]} \frac{\abs{\sin(t + k\pi)}}{t + k\pi}
\\
&= \sum_{k=1}^{N} \int_{[0, \pi]} \frac{\abs{\sin t}}{t + k\pi} \\
&\geq \sum_{k=1}^{N} \frac{1}{(k+1)\pi} \int_{[0, \pi]} \sin t.
\end{align*}
Assuming we know some of the upcoming results, in particular, assuming
that we know that for bounded functions the Lebesgue integral is the same
as the Riemann integral, we see that the above is
\begin{equation*}
= \frac{2}{\pi} \sum_{k=1}^{N} \frac{1}{k+1},
\end{equation*}
which is a harmonic series and hence divergent.
\item If $f \in \mathcal{L}_1(E, \overline{\mathbb{R}})$, then
\begin{equation*}
mf^{-1}(\{-\infty\}) = 0 = mf^{-1}(\{\infty\}).
\end{equation*}
\begin{ex}
Prove that the above is indeed the case.
\end{ex}
\item Following the above, if we set
\begin{equation*}
H = f^{-1}(\{- \infty, \infty\}),
\end{equation*}
then $H \in \mathfrak{M}(E)$ and $mH = 0$. Letting
\begin{equation*}
g = f \cdot \chi_{E \setminus H}.
\end{equation*}
Then
\begin{equation*}
g = f \text{ a.e. and } g \in \mathcal{L}_1(E, \mathbb{R}).
\end{equation*}
This will prove itself more useful than it seems, especially since
$\mathcal{L}_1(E, \overline{\mathbb{R}})$ is that it is \hlimpo{not a
vector space}!!!
\item Suppose that $g : E \to \mathbb{C}$ is measurable. Let us write
\begin{equation*}
g = (g_1 - g_2) + i(g_3 - g_4),
\end{equation*}
where $g_1 = (\Re g)^+$, $g_2 = (\Re g)^-$, $g_3 = (\Im g)^+)$ and $g_4 =
(\Im g)^-$. Then we say that $g$ is \hlnotea{Lebesgue integrable}, and
write
\begin{equation*}
g \in \mathcal{L}_1(E, \mathbb{C}),
\end{equation*}
if
\begin{equation*}
\int_{E} g_k < \infty \quad \forall 1 \leq k \leq 4,
\end{equation*}
and we write
\begin{equation*}
\int_{E} g = \left(\int_{E} g_1 - \int_{E} g_2 \right) + i \left(
\int_{E} g_3 + \int_{E} g_4 \right).
\end{equation*}
\end{enumerate}
\end{remark}
\begin{propo}[Linearity of Lebesgue Integral for Lebesgue Integrable Functions]\label{propo:linearity_of_lebesgue_integral_for_lebesgue_integrable_functions}
let $E \in \mathfrak{M}(\mathbb{R})$. Suppose that $f, g \in \mathcal{L}_1(E,
\mathbb{R})$ and $\kappa \in \mathbb{R}$.
\begin{enumerate}
\item $\kappa f \in \mathcal{L}_1(E, \mathbb{R})$ and $\int_{E} \kappa f =
\kappa \int_{E} f$.
\item $f + g \in \mathcal{L}_1(E, \mathbb{R})$ and $\int_{E} (f + g) =
\int_{E} f + \int_{E} g$.
\item Finally,
\begin{equation*}
\abs{ \int_{E} f } \leq \int_{E} \abs{f}.
\end{equation*}
\end{enumerate}
\end{propo}
\begin{proof}
Note that \cref{crly:linearity_of_the_lebesgue_integral_and_other_results}
covers for the cases where $f, g \in \mathcal{L}(E, [0, \infty])$ and $\kappa
\geq 0$ for (1) and (2). This is, unfortunately, insufficient for the entire
proposition
\begin{enumerate}
\item Let $f = f^+ - f^-$.
\noindent
\hlbnoted{Case 1: $\kappa = 0$} We have that
\begin{equation*}
\int_{E} \kappa f = \int_{E} 0 = 0 = \kappa \int_{E} f.
\end{equation*}
\noindent
\hlbnoted{Case 2: $k > 0$} We have
\begin{equation*}
\kappa f = (\kappa f)^+ - (\kappa f)^-.
\end{equation*}
Note
\begin{equation*}
(\kappa f)^+ = \kappa f^+ \text{ and } (\kappa f)^- = \kappa f^-.
\end{equation*}
So, since $f^+, -f^- \in \mathcal{L}(E, [0, \infty])$, by
\cref{crly:linearity_of_the_lebesgue_integral_and_other_results},
\begin{align*}
\int_{E} \kappa f
&= \int_{E} \kappa f^+ - \int_{E} \kappa f^- \\
&= \kappa \int_{E} f^+ - \kappa \int_{E} f^- \\
&= \kappa \left( \int_{E} f^+ - f^- \right) \\
&= \kappa \int_{E} f.
\end{align*}
\noindent
\hlbnoted{Case 3: $\kappa < 0$} Similar to the above, we first observe
that
\begin{equation*}
(\kappa f)^+ = -\kappa f^- \text{ and } (\kappa f)^- = -\kappa f^+.
\end{equation*}
Then by the same reason as in the last case, we have
\begin{align*}
\int_{E} \kappa f
&= \int_{E} -\kappa f^- - \int_{E} -\kappa f^+ \\
&= - \kappa \left( \int_{E} f^- - \int_{E} f^+ \right) \\
&= - \kappa \left( - \int_{E} f \right) \\
&= \kappa \int_{E} f.
\end{align*}
\item \hlbnoted{$f + g \in \mathcal{L}_1(E, \mathbb{R})$} For convenience,
let
\begin{equation*}
h = f + g = f^+ - f^- + g^+ - g^-.
\end{equation*}
Notice that
\begin{equation*}
h^+, h^- \leq \abs{h} = \abs{f + g}
\leq \abs{f} + \abs{g} = f^+ + f^- + g^+ + g^-.
\end{equation*}
Thus by \cref{crly:linearity_of_the_lebesgue_integral_and_other_results},
\begin{align*}
\int_{E} h^+ &\leq \int_{E} f^+ + f^- + g^+ + g^- \\
&= \int_{E} f^+ + \int_{E} f^- + \int_{E} g^+ + \int_{E}
g^- < \infty.
\end{align*}
Similarly, $\int_{E} h^- < \infty$.
\noindent
\hlbnoted{$\int_{E} (f + g) = \int_{E} f + \int_{E} g$} Notice that
\begin{equation*}
h^+ - h^- = h = f + g = f^+ - f^- + g^+ - g^-,
\end{equation*}
and so
\begin{equation*}
h^+ + f^- + g^- = h^- + f^+ + g^+.
\end{equation*}
Then by \cref{crly:linearity_of_the_lebesgue_integral_and_other_results},
\begin{equation*}
\int_{E} h^+ + \int_{E} f^- + \int_{E} g^-
= \int_{E} h^- + \int_{E} f^+ + \int_{E} g^+,
\end{equation*}
and so
\begin{align*}
\int_{E} (f + g) &= \int_{E} h = \int_{E} h^+ - \int_{E} h^- \\
&= \int_{E} f^+ - \int_{E} f^- + \int_{E} g^+ -
\int_{E} g^- \\
&= \int_{E} f + \int_{E} g.
\end{align*}
\item First, notice that $f \in \mathcal{L}_1(E, \mathbb{R})$, by our
previous remark, $\abs{f} \in \mathcal{L}_1(E, \mathbb{R})$. Now since
$\abs{f} = f^+ + f^-$, and $\abs{\int_{E} f^+}, \abs{\int_{E} f^-} \geq
0$,
\begin{align*}
\abs{\int_{E} f}
&= \abs{ \int_{E} f^+ - \int_{E} f^- } \\
&\leq \abs{\int_{E} f^+} + \abs{\int_{E} f^-} \\
&= \int_{E} f^+ + \int_{E} f^- \\
&= \int_{E} \abs{f}.
\end{align*}
\end{enumerate}
\end{proof}
% section lebesgue_integration_continued (end)
% chapter lecture_8_may_30th_2019 (end)
\chapter{Lecture 9 Jun 04 2019}%
\label{chp:lecture_9_jun_04_2019}
% chapter lecture_9_jun_04_2019
\section{Lebesgue Integration (Continued 2)}%
\label{sec:lebesgue_integration_continued_2}
% section lebesgue_integration_continued_2
Thus far, we've only integrated simple functions, and never even did so for,
say, $f(x) = x$. Trying to do that will lead to intense swearing, rising of
blood pressure, heavy signs of nausea and mental pain. Why? Well just try doing
it.
\begin{ex}[How a slime became one heck of a monster to deal with]
Calculate $\int_{[0, 1]} x$.
\end{ex}
We hate pain, and now we want to crawl back to Riemann integration and ask for
forgiveness. Fortunately, the nice world of Riemann integration is kind enough
to give us a bridge. We shall now study this bridge. In particular, we shall see
that for \hlimpo{bounded} functions on \hlimpo{closed, bounded} intervals,
Riemann integrability implies Lebesgue integrability, and, in fact, they
coincide on these functions. In particular, this opens up the
\hlnotea{Fundamental Theorem of Calculus} (for Riemann integration) to us.
\begin{lemma}[Riemann Integrability and Lebesgue Integrability of Step Functions]\label{lemma:riemann_integrability_and_lebesgue_integrability_of_step_functions}
Let $a < b \in \mathbb{R}$ and $\phi : [a, b] \to \mathbb{R}$ be a step
function. Then $\phi$ is both Riemann integrable and Lebesgue integrable, and
\begin{equation*}
\int_{[a, b]} \phi = \int_{a}^{b} \phi.
\end{equation*}
\end{lemma}
\begin{proof}
Let $P = \{ a = p_0 < p_1 < p_2 < \hdots p_N = b \} \in \mathcal{P}([a, b])$,
where the $p_n$'s are chosen such that $[p_{n-1}, p_n)$ do not contain a
`jump'. Since $\phi$ is a step function, let
\begin{equation*}
\phi = \sum_{n=1}^{N} \alpha_n \chi_{[p_{n-1}, p_n)},
\end{equation*}
where $\alpha_n = \phi(x)$ for all $x \in [p_{n-1}, p_n)$, $1 \leq n \leq N$.
Then
\begin{align*}
\int_{[a, b]} \phi
&= \sum_{n=1}^{N} \alpha_n m[p_{n-1}, p_n) \\
&= \sum_{n=1}^{N} \alpha_n (p_n - p_{n-1}) \\
&= \sum_{n=1}^{N} \int_{p_{n-1}}^{p_n} \alpha_n \\
&= \int_{a}^{b} \sum_{n=1}^{N} \alpha_n \chi_[p_{n-1}, p_n) \\
&= \int_{a}^{b} \phi.
\end{align*}
\end{proof}
\begin{thm}[Bounded Riemann-Integrable Functions are Lebesgue Integrable]\label{thm:bounded_riemann_integrable_functions_are_lebesgue_integrable}
Let $a < b \in \mathbb{R}$ and $f : [a, b] \to \mathbb{R}$ be a bounded,
Riemann-integrable function. Then $f \in \mathcal{L}_1([a, b], \mathbb{R})$
and
\begin{equation*}
\int_{[a, b]} f = \int_{a}^{b} f,
\end{equation*}
i.e. the Lebesgue and Riemann integrals of $f$ over $[a, b]$ coincide.
\end{thm}
\begin{strategy}
Here is my understanding of the idea that motivates this proof.
\begin{enumerate}
\item It is important that the function is bounded both on its domain and
its range. A bound on the domain allows us to do finite sums, and a bound
on the range puts a cap on how high our rectangles can be.
\item We need to reduce the problem to deal only with step functions, using
step functions as close to $f$ as possible, and then use our earlier
results and intuition to forge forward.
\end{enumerate}
\end{strategy}
\begin{proof}
First, since $f$ is bounded, wma $\abs{f} < M \in \mathbb{R}$. Let $g =
M\chi_{[a, b]}$, which is a step-function and is hence integrable by
\cref{lemma:riemann_integrability_and_lebesgue_integrability_of_step_functions}.
Then, notice that $f + g$ is Riemann integrable. Furthermore, observe that
\begin{equation*}
\int_{a}^{b} (f + g) = \int_{a}^{b} f + M(b - a).
\end{equation*}
So $f + g \in \mathcal{L}_1([a, b], \mathbb{R})$ iff $f \in \mathcal{L}_1([a,
b], \mathbb{R})$.
Now, by \cref{thm:cauchy_criterion_of_riemann_integrability}, for each $n \geq
1$, $\exists R_n \in \mathcal{P}[a, b]$ partition such that $\forall X, Y
\supseteq R_n$ refinements, $\forall X^*, Y^*$ test values of $X$ and $Y$
respectively, we have
\begin{equation*}
\abs{S(f, X, X^*) - S(f, Y, Y^*)} < \frac{1}{N}.
\end{equation*}
\sidenote{Get finer and finer refinements.} Now, let $Q_N = \bigcup_{n=1}^{N}
R_n$, so that it is a \hlimpo{common refinement} of $R_1, R_2, \ldots, R_N$.
Write
\begin{equation*}
Q_N = \left\{ a = q_{0,N} < q_{1, N} < \hdots < q_{m_N,N} \right\}.
\end{equation*}
\sidenote{Look at each subinterval of each refinement.} Let
\begin{equation*}
H_{k,N} = [q_{k,N}, q_{k+1, N}] \text{ for } 1 \leq k \leq m_N - 1,
\end{equation*}
and
\begin{equation*}
H_{m_N,N} = [q_{m_N-1, N}, q_{m_N,N}].
\end{equation*}
\sidenote{Get the sup and inf of each interval under $f$.} Define for each $1
\leq k \leq m_N$,
\begin{gather*}
\alpha_{k, N} \coloneqq \inf \{ f(t) : t \in H_{k, N} \} \leq -M \\
\beta_{k,N} \coloneqq \sup \{ f(t) : t \in H_{k, N} \} \leq M.
\end{gather*}
\sidenote{Use the above $\alpha$'s and $\beta$'s to construct simple
functions, which are step-like functions.} For each $N \geq 1$, let
\begin{gather*}
\phi_{N} \coloneqq \sum_{k=1}^{m_N} \alpha_{k, N} \chi_{H_{k, N}} \\
\psi_{N} = \sum_{k=1}^{m_N} \beta_{k,N} \chi_{H_{k,N}}.
\end{gather*}
Since each $\phi_N, \psi_N$ is simple, they are all measurable and Lebesgue
integrable (cf.
\cref{lemma:riemann_integrability_and_lebesgue_integrability_of_step_functions}).
Now, notice that
\begin{equation*}
Q_1 \subseteq Q_2 \subseteq \hdots \subseteq Q_N \subseteq Q_{N+1} \subseteq
\hdots
\end{equation*}
since it is a sequence of finer and finer refinements, we have
\begin{equation}\label{eq:inc_dec_of_simp_fns_to_f_riem_intg}
\phi_1 \leq \phi_2 \leq \phi_3 \leq \hdots \leq f \leq \hdots \leq \psi_3
\leq \psi_2 \leq \psi_1.
\end{equation}
Thus, by
\cref{lemma:riemann_integrability_and_lebesgue_integrability_of_step_functions}
and \cref{lemma:monotonicity_of_the_lebesgue_integral_and_other_lemmas}, we have
\begin{equation*}
\int_{[a, b]} \phi_N = \int_{a}^{b} \phi_N \leq \int_{a}^{b} f \leq
\int_{a}^{b} \psi_N = \int_{[a, b]} \psi_N
\end{equation*}
for each $N$. Since $Q_N$ is a refinement of $R_N$, we have that
\begin{equation*}
\abs{S(f, Q_N, Q_N^*) - S(f, Q_N, Q_N^{**})} < \frac{1}{N},
\end{equation*}
which implies
\begin{equation*}
\abs{\int_{[a, b]} \phi_N - \int_{[a, b]} \psi_N} < \frac{1}{N},
\end{equation*}
for $N \geq 1$.
Due to \cref{eq:inc_dec_of_simp_fns_to_f_riem_intg}, let
\begin{equation*}
\phi \coloneqq \lim_{N \geq 1} \phi_N \text{ and } \psi \coloneqq \lim_{N
\geq 1} \psi_N.
\end{equation*}
Then by the \hyperref[thm:the_monotone_convergence_theorem]{MCT}, we have that
\begin{align*}
&\int_{[a, b]} \phi = \lim_{N \to \infty} \int_{[a, b]} \phi_N = \lim_{N \to
\infty} \int_{a}^{b} \phi_N \\
&= \int_{a}^{b} f \\
&= \lim_{N \to \infty} \int_{a}^{b} \psi_N = \lim_{N \to \infty} \psi_N =
\int_{[a, b]} \psi.
\end{align*}
Then $\int_{[a, b]} \phi - \psi = 0$. Since $\phi \leq \psi$, we must thus
have $\phi = \psi$ a.e. on $[a, b]$. Since $\phi \leq f \leq \psi$, we have
that $\phi = f = \psi$ a.e. on $[a, b]$. Since $\phi, \psi$ are measurable, so
is $f$, and thus
\begin{equation*}
\int_{[a, b]} f = \int_{[a, b]} \phi = \int_{a}^{b} f < \infty.
\end{equation*}
\end{proof}
\begin{crly}[Bounded Riemann-Integrable Functions are Lebesgue Integrable --
Complex Version]\label{crly:bounded_riemann_integrable_functions_are_lebesgue_integrable_complex_version}
Let $a < b \in \mathbb{R}$ and $f : [a, b] \to \mathbb{C}$ be a bounded,
Riemann-integrable function.
Then $g \in \mathcal{L}_1([a, b], \mathbb{C})$ and
\begin{equation*}
\int_{[a, b]} f = \int_{a}^{b} f.
\end{equation*}
\end{crly}
Our earlier demon-level slime has been reduced back to being a, well,
slime-level monster.
\begin{eg}
Let $f(x) = x$ and $x \in [0, 1]$. Then by the \hlnotea{Fundamental Theorem of
Calculus},
\begin{equation*}
\int_{[0, 1]} f = \int_{0}^{1} f = \frac{x^2}{2} \at{0}{1} = \frac{1}{2} - 0
= \frac{1}{2}.
\end{equation*}
\end{eg}
\begin{eg}
Let $f(x) = \frac{1}{x^2}$ where $x \in E \coloneqq [1, \infty)$. We want to
calculate $\int_{[1, \infty)} f$. For each $n \geq 1$, set $f_n \coloneqq f
\cdot \chi_{[1, n]}$. Then $f$ is measurable, since it is
continuous except at one point on $E$, and
\begin{equation*}
0 \leq f_1 \leq f_2 \leq \hdots,
\end{equation*}
with
\begin{equation*}
\lim_{n \to \infty} f_n(x) = f(x) \quad \forall x \geq 1.
\end{equation*}
By \cref{thm:bounded_riemann_integrable_functions_are_lebesgue_integrable},
for all $n \geq 1$,
\begin{equation*}
\int_{[1, n]} f_n = \int_{1}^{n} f_n = \int_{1}^{n} \frac{1}{x^n} =
-\frac{1}{x} \at{1}{n} = 1 - \frac{1}{n}.
\end{equation*}
By the \hyperref[thm:the_monotone_convergence_theorem]{MVT},
\begin{align*}
\int_{[1, \infty)} f
&= \lim_{n \to \infty} \int_{[1, n]} f_n \\
&= \lim_{n \to \infty} \left( 1 - \frac{1}{n} \right) = 1.
\end{align*}
\end{eg}
\begin{note}
In the above example, the Lebesgue integral of $f$ returns the value of the
improper Riemann integral of $f$ over $[1, \infty)$, which is not what
happened in another function that we looked at earlier. There are 2 things to
note here:
\begin{itemize}
\item it is possible for an improper Riemann integral of a measurable
function $f : [1, \infty) \to \mathbb{R}$ to exist, even though the
Lebesgue integral $\int_{[1, \infty)} f$ \hlimpo{does not exist}!
\item There is no notion of an ``improper'' Lebesgue integral. The domain of
$f$, $[1, \infty)$, is just another measurable set.
\end{itemize}
\end{note}
\newthought{In the} \hyperref[thm:the_monotone_convergence_theorem]{Monotone
Convergence Theorem}, if the ``increasing'' assumption is dropped, then the
result may not hold.
\begin{eg}[The MCT needs an increasing/decreasing sequence of functions]
Consider the sequence $(f_n)_n$ given by
\begin{equation*}
f_n : [1, \infty) \to \mathbb{R}
\end{equation*}
where
\begin{equation*}
x \mapsto \begin{cases}
\frac{1}{nx} & 1 \leq x \leq e^n \\
0 & x > e^n
\end{cases}.
\end{equation*}
Then $(f_n)_n$ converges \hlnotea{uniformly} to $f = 0$ on $[1, \infty)$. Note
that for all $n \geq 1$, $f_n$ is Riemann integrable, and bounded on $[1,
e^n]$, and so
\begin{align*}
\int_{[1, \infty)} f_n &= \int_{[1, e^n]} \frac{1}{nx} \\
&= \int_{1}^{e^n} \frac{1}{nx} \\
&= \frac{1}{n} \ln x \at{1}{e^n} \\
&= \frac{1}{n} (n-0) = 1,
\end{align*}
for each $n \geq 1$. However,
\begin{equation*}
\int_{[1, \infty)} f = \int_{[1, \infty)} f = 0.
\end{equation*}
\end{eg}
% section lebesgue_integration_continued_2 (end)
% chapter lecture_9_jun_04_2019 (end)
\chapter{Lecture 10 Jun 06th 2019}%
\label{chp:lecture_10_jun_06th_2019}
% chapter lecture_10_jun_06th_2019
\section{Lebesgue Integration (Continued 3)}%
\label{sec:lebesgue_integration_continued_3}
% section lebesgue_integration_continued_3
\begin{thm}[Fatou's Lemma]\index{Fatou's Lemma}\label{thm:fatou_s_lemma}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f_n \in \mathcal{L}(E, [0,
\infty])$, for $n \geq 1$. Then
\begin{equation*}
\int_{E} \liminf_{n \geq 1} f_n \leq \liminf_{n \geq 1} \int_{E} f_n.
\end{equation*}
\end{thm}
\begin{proof}
For each $N \geq 1$, set $g_N = \inf \{ f_n : n \geq N \}$. Then by
\cref{propo:measurability_of_limits_and_extremas}, each $g_N$ is measurable,
and clearly
\begin{equation*}
g_1 \leq g_2 \leq g_3 \leq \hdots.
\end{equation*}
Then by the \hyperref[thm:the_monotone_convergence_theorem]{MCT}, we have
\begin{equation*}
\int_{E} \liminf_{n \geq 1} f_n = \int_{E} \lim_{N \to \infty} g_N = \lim_{N
\to \infty} \int_{E} g_N.
\end{equation*}
Since $g_N \leq f_n$ for all $n \geq N$ (by construction), we have
\begin{equation*}
\int_{E} g_N \leq \int_{E} f_n
\end{equation*}
for all $n \geq N$, whence
\begin{equation*}
\int_{E} g_N \leq \liminf_{n \geq 1} \int_{E} f_n.
\end{equation*}
Since this holds for all $N \geq 1$, we have that
\begin{equation*}
\int_{E} \liminf_{n \geq 1} f_n = \lim_{N \to \infty} \int_{E} g_N \leq
\liminf_{n \geq 1} \int_{E} f_n.
\end{equation*}
\end{proof}
An example where the inequality in Fatou's Lemma is strict is the following.
\begin{eg}
Consider a sequence of functions $f_n = n\chi_{\left(0, \frac{1}{n}\right]}$,
$n \geq 1$. It's clear that for any $x \in [0, 1]$, $\lim_{n \to \infty}
f_n(x) = 0$. Thus
\begin{equation*}
\int_{[0, 1]} \liminf_{n \geq 1} f_n = \int_{[0, 1]} 0 = 0.
\end{equation*}
On the other hand
\begin{equation*}
\int_{[0, 1]} f_n = n m \left( \left( 0, \frac{1}{n} \right] \right) = 1
\end{equation*}
for all $n \geq 1$, and so $\liminf_{n \geq 1} \int_{[0, 1]} f_n = 1$.
\end{eg}
\begin{eg}
Suppose $E \in \mathfrak{M}(\mathbb{R})$, $f \in \mathcal{L}(E,
\overline{\mathbb{R}})$. Recall that $f \in \mathcal{L}_1(E,
\overline{\mathbb{R}}) \iff \abs{f} \in \mathcal{L}_1(E,
\overline{\mathbb{R}})$.
Suppose $g \in \mathcal{L}_1(E, \overline{\mathbb{R}})$, $f \in
\mathcal{L}(E, \overline{\mathbb{R}})$ and suppose $0 \leq \abs{f} \leq g$
a.e. on $E$, and that $\int_{E} g < \infty$. Then $\int_{E} \abs{f} \leq
\int_{E} g < \infty$, which thus $f \in \mathcal{L}_1(E,
\overline{\mathbb{R}})$.
\end{eg}
\begin{thm}[Lebesgue Dominated Convergence Theorem]\index{Lebesgue Dominated Convergence Theorem}\label{thm:lebesgue_dominated_convergence_theorem}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $(f_n)_n$ in $\mathcal{L}_1(E,
\overline{\mathbb{R}})$. Suppose that there exists $g \in \mathcal{L}_1(E.
\overline{\mathbb{R}})$ such that $\abs{f_n} \leq g$ a.e. on $E$, for $n \geq
1$. Suppose furthermore that $f : E \to \overline{\mathbb{R}}$ is a function,
and that
\begin{equation*}
f(x) = \lim_{n \to \infty} f_n(x), \text{ a.e. on } E.
\end{equation*}
Then $f \in \mathcal{L}_1(E, \overline{\mathbb{R}})$ and
\begin{equation*}
\int_{E} f = \lim_{n \to \infty} \int_{E} f_n.
\end{equation*}
\end{thm}
\begin{proof}
\hlbnoted{Isolating ``bad'' points}
Consider, for each $n \geq 1$, the set
\begin{equation*}
Y_n \coloneqq \{ x \in E : \abs{f_n(x)} > g(x) \}.
\end{equation*}
By assumption, $mY_n = 0$ for each $n \geq 1$. Letting
\begin{equation*}
Y \coloneqq \bigcup_{n=1}^{\infty} Y_n = \{ x \in E : \abs{f_n(x)} > g(x), n
\geq 1 \},
\end{equation*}
we have that
\begin{equation*}
0 \leq mY \leq \sum_{n=1}^{\infty} mY_n = 0,
\end{equation*}
and so $mY = 0$.
Furthermore, consider
\begin{equation*}
Z \coloneqq \{ x \in E : f(x) \neq \lim_{n \to \infty} f_n(x) \}.
\end{equation*}
By assumption, $mZ = 0$.
Let
\begin{equation*}
B \coloneqq Y \cup Z.
\end{equation*}
Then $\forall x \in B$, we have
\begin{equation*}
f(x) \neq \lim_{n \to \infty} f_n(x) \text{ and }
\abs{f_n(x)} > g(x) \text{ for each } n \geq 1.
\end{equation*}
Most importantly, we have that
\begin{equation*}
0 \leq mB \leq mY + mZ = 0,
\end{equation*}
and so $mB = 0$.
Let $H = E \setminus B$. Then $\forall x \in H$,
\begin{equation*}
f(x) = \lim_{n \to \infty} f_n(x) \text{ and }
\abs{f_n(x)} \leq g(x) \text{ for each } n \geq 1.
\end{equation*}
It follows that if we can prove the statement for
\begin{equation*}
f_n \restriction_{H} \text{ and } f \restriction_H,
\end{equation*}
then we obtain the result that we desire. Thus, wlog, we may replace $E$ with
$H$.
\noindent
\hlbnoted{Proving the statement}
Since $f(x) = \lim_{n \to \infty} f_n(x)$, by A2, we have that
\begin{equation*}
\limsup_{n \geq 1} f_n (x) = \liminf_{n \geq 1} f_n(x) = \lim_{n \to \infty}
f_n(x) = f(x),
\end{equation*}
and so in particular we have
\begin{equation*}
\int_{E} f = \int_{E} \liminf_{n \geq 1} f_n(x).
\end{equation*}
From \hyperref[thm:fatou_s_lemma]{Fatou's Lemma} and A2, we have that
\begin{equation*}
\int_{E} f \leq \liminf_{n \geq 1} \int_{E} f_n \leq \limsup_{n \geq 1}
\int_{E} f_n.
\end{equation*}
Now, notice that $g - f_n \geq 0$ \sidenote{This is required to invoke Fatou's
Lemma}, and we have
\begin{align*}
\int_E (g - f) &= \int_{E} (g - \limsup_{n \geq 1} f_n) \\
&= \int_E \liminf_{n \geq 1} (g - f_n) \\
&\leq \liminf_{n \geq 1} \int_{E} (g - f_n) \quad \because
\hyperref[thm:fatou_s_lemma]{Fatou's} \\
&= \int_{E} g - \limsup_{n \geq 1} \int_{E} f_n.
\end{align*}
Thus
\begin{equation*}
\int_{E} f \geq \limsup_{n \geq 1} \int_{E} f_n.
\end{equation*}
Therefore
\begin{equation*}
\limsup_{n \geq 1} \int_{E} f_n \leq \int_{E} f \leq \liminf_{n \geq 1}
\int_{E} f_n \leq \limsup_{n \geq 1} \int_{E} f_n.
\end{equation*}
By the \hlnotea{Squeeze Theorem}, we obtain
\begin{equation*}
\int_{E} f = \lim_{n \to \infty} \int_{E} f_n.
\end{equation*}
\end{proof}
% section lebesgue_integration_continued_3 (end)
\section{\texorpdfstring{$L_p$}{Lp} Spaces}%
\label{sec:_l_p_spaces}
% section _l_p_spaces
Functional analysis is the study of normed linear spaces and the continuous
linear maps between them. Amongst the most important examples are the so-called
$L_p$-spaces, and we will now turn our attention towards them. \marginnote{You
may wish to refresh your memory on the definition of a
\hyperref[defn:semi_norm]{semi-norm}.}.
\begin{eg}
Let $E \in \mathfrak{M}(\mathbb{K})$ and $mE > 0$. Recall that
\begin{equation*}
\mathcal{L}_1(E, \mathbb{K}) = \left\{ f \in \mathcal{L}(E, \mathbb{K}) :
\int_{E} \abs{f} < \infty \right\}.
\end{equation*}
Define the map
\begin{align*}
\nu_1 : \mathcal{L}_1(E, \mathbb{K}) &\to \mathbb{K} \\
f & \mapsto \int_{E} \abs{f}.
\end{align*}
Observe that
\begin{itemize}
\item $\nu_1(f) \geq 0$ for all $f \in \mathcal{L}_1(E, \mathbb{K})$;
\item $\nu_1(0) = \int_{E} \abs{0} = 0$;
\item $\kappa \in \mathbb{K} \implies$
\begin{equation*}
\nu_1(\kappa f) = \int_{E} \abs{\kappa f} = \abs{\kappa} \int_{E}
\abs{f} = \abs{\kappa} \nu_1(f);
\end{equation*}
and
\item $\forall f, g \in \mathcal{L}_1(E, \mathbb{K})$
\begin{equation*}
\nu_1(f + g) = \int_{E} \abs{f + g} \leq \int_{E} \abs{f} + \int_{E}
\abs{g} = \nu_1(f) + \nu_1(g).
\end{equation*}
\end{itemize}
However, it is important to notice that for any $x_0 \in E$,
\begin{equation*}
\nu_1(\chi_{\{x_0\}}) = \int_{\{ x_0 \}} 1 = 0.
\end{equation*}
Thus $\nu_1$ is \hlimpo{not a norm} since $\chi_{\{x_0\}} \neq \emptyset$.
\end{eg}
\begin{propo}[Kernel of a Vector Space is a Linear Manifold]\label{propo:kernel_of_a_vector_space_is_a_linear_manifold}
Let $\mathcal{W}$ be a vector space over the field $\mathbb{K}$, and suppose
that $\nu$ is a seminorm on $\mathcal{W}$. Let
\begin{equation*}
\mathcal{N} \coloneqq \{ w \in W : \nu(w) = 0 \}.
\end{equation*}
Then $\mathcal{N}$ is a \hldefn{linear manifold} \sidenote{A subspace $M$ of a
Hilbert space, which is a vector space with an inner product such that
its \hlnotec{induced} norm, which in turn induces a metric on the space,
makes the space a complete metric space, is called a
linear manifold if it is \hlnotec{closed under addition and scalar multiplication}.
(Source: \citet{mathworld_linearmanifold})
Here, we can safely talk about Hilbert spaces because $\mathbb{K}$ is endowed
with an inner product. Furthermore, the check is to simply show that $M$ is a
subspace of the original space.} in
$\mathcal{W}$ and so $\mathcal{W} / \mathcal{N}$ is a vector space over
$\mathbb{K}$, whose elements we denote by
\begin{equation*}
[x] \coloneqq x + \mathcal{N}.
\end{equation*}
Furthermore, the map
\begin{align*}
\norm{\cdot} : \mathcal{W} / \mathcal{N} &\to \mathbb{K} \\
[x] &\mapsto \nu(x)
\end{align*}
is well-defined, and defines a norm on $\mathcal{W} / \mathcal{N}$.
\end{propo}
\begin{proof}
\hlbnoted{$\mathcal{N}$ is a linear manifold} Firstly, note that $\nu(0) = 0
\implies 0 \in \mathcal{N}$. Thus $\mathcal{N} \neq \emptyset$. Let $x, y \in
\mathcal{N}$ and $\kappa \in \mathbb{K}$. Then
\begin{equation*}
0 \leq \nu(\kappa x + y) \leq \abs{\kappa} \nu(x) + \nu(y) = 0 ,
\end{equation*}
which implies
\begin{equation*}
\nu(\kappa x + y) = 0.
\end{equation*}
Thus $\kappa x + y \in \mathcal{N}$.
\noindent
\hlbnoted{$\mathcal{W} / \mathcal{N}$ is a vector space over $\mathbb{K}$}
This is a result from elementary linear algebra theory, but let's do it for
revision. It is clear that $\mathcal{N} \in \mathcal{W} / \mathcal{N}$, so
$\mathcal{W} / \mathcal{N} \neq \emptyset$. Notice that for any $[x], [y] \in
\mathcal{W} / \mathcal{N}$ and $\kappa \in \mathbb{K}$, we define the
operations
\begin{equation*}
[\kappa x + y] = \kappa [x] + [y].
\end{equation*}
By the commutativity of addition,
\begin{equation*}
[x + y] = x + y + \mathcal{N} = y + x + \mathcal{N} = [y + x].
\end{equation*}
The additive identity is $[0] = 0 + \mathcal{N}$, multiplicative identity is
$[1] = 1 + \mathcal{N}$, and additive inverse of $[x]$ is $[-x]$.
We note that $\mathcal{W} / \mathcal{N}$ is normally referred to as the
\hldefn{quotient space} of $\mathcal{W}$ by $\mathcal{N}$.
\noindent
\hlbnoted{$\norm{\cdot}$ is well-defined} Let $[x_1] = [x_2] \in
\mathcal{W} / \mathcal{N}$. Then $[x_1 - x_2] = [0]$ and so $x_1 - x_2 \in
\mathcal{N}$. Then
\begin{equation*}
\nu(x_1 - x_2) = 0,
\end{equation*}
which then since
\begin{equation*}
0 \leq \abs{\nu(x_1) - \nu(x_2)} \leq \nu(x_1 - x_2) = 0,
\end{equation*}
we have that $\nu(x_1) = \nu(x_2)$. Hence
\begin{equation*}
\norm{[x_1]} = \norm{[x_2]},
\end{equation*}
and so $\nu(\cdot)$ is well-defined.
\noindent
\hlbnoted{$\norm{\cdot}$ is a norm} Let $[x], [y] \in \mathcal{W} /
\mathcal{N}$ and $\kappa \in \mathbb{K}$. Then
\begin{itemize}
\item $\norm{[x]} = \nu(x) \geq 0$;
\item $\norm{\kappa[x]} = \norm{[\kappa x]} = \nu(\kappa x) = \abs{\kappa}
\nu(x) = \abs{\kappa} \norm{[x]}$;
\item $\norm{[x] + [y]} = \norm{[x + y]} = \nu(x + y) \leq \nu(x) + \nu(y) =
\norm{[x]} + \norm{[y]}$; and
\item $\norm{[x]} = 0 \implies \nu(x) = 0 \implies x \in \mathcal{N} \iff
[x] = [0] \in \mathcal{W} / \mathcal{N}$.
\end{itemize}
Thus $\norm{\cdot}$ is indeed a norm.
Hence, $\mathcal{W} / \mathcal{N}$ is a normed linear space.
\end{proof}
\begin{eg}
In our last example, we determined that $\nu_1(\cdot)$ is a seminorm on
$\mathcal{L}_1(E, \mathbb{K})$. Suppose
\begin{equation*}
g \in \mathcal{N}_1(E, \mathbb{K}) \coloneqq \{ f \in \mathcal{L}_1(E,
\mathbb{K}) : \nu_1(f) = 0 \}.
\end{equation*}
Then $\int_{E} \abs{g} = 0$. Since $mE > 0$, this happens if and only if $g =
0$ a.e. on $E$.
Since $g = 0$ a.e. on $E$ iff $\int_{E} \abs{g} = 0$, we can also define
\begin{equation*}
\mathcal{N}_1(E, \mathbb{K}) = \{ g \in \mathcal{L}_1(E, \mathbb{K}) : g = 0
\text{ a.e. on } E \}.
\end{equation*}
Setting
\begin{equation*}
L_1(E, \mathbb{K}) = \mathcal{L}_1(E, \mathbb{K}) / \mathcal{N}_1(E,
\mathbb{K}),
\end{equation*}
we have that $[f] = [g]$ iff $f - g \in \mathcal{N}_1(E, \mathbb{K})$, i.e. $f
= g$ a.e. on $E$.
\end{eg}
\begin{defn}[$L_1$-space]\index{$L_1$-space}\label{defn:_l_1_space}
Let $E \in \mathfrak{M}(\mathbb{K})$ with $mE > 0$. We define the $L_1$-space
as
\begin{equation*}
L_1(E, \mathbb{K}) := \mathcal{L}_1(E, \mathbb{K}) / \mathcal{N}_1(E,
\mathbb{K}),
\end{equation*}
with the norm
\begin{align*}
\norm{\cdot} : L_1(E, \mathbb{K}) &\to \mathbb{R} \\
\norm{[f]} &\coloneqq \int_{E} f.
\end{align*}
\end{defn}
\begin{defn}[$\mathcal{L}_p(E, \mathbb{K})$]\index{$\mathcal{L}_p(E, \mathbb{K})$}\label{defn:_l_p_e_k_}
Let $E \in \mathfrak{M}(\mathbb{K})$ with $mE > 0$. If $1 < p < \infty$ in
$\mathbb{R}$, we define
\begin{align*}
\mathcal{L}_p(E, \mathbb{K})
&\coloneqq \{ f \in \mathcal{L}(E, \mathbb{K}) :
\int_{E} \abs{f}^p < \infty \} \\
&= \{ f \in \mathcal{L}(E, \mathbb{K}) : \abs{f}^p \in \mathcal{L}_1(E,
\mathbb{K}) \}.
\end{align*}
\end{defn}
We need to show that $\mathcal{L}_p(E, \mathbb{K})$ is a vector space for all $1
< p < \infty$, and that
\begin{equation*}
\nu_p(f) \coloneqq \left( \int_{E} \abs{f}^p \right)^{\frac{1}{p}}
\end{equation*}
defines a semi-norm on $\mathcal{L}_p(E, \mathbb{K})$. If we can establish these
results, we can then appeal to
\cref{propo:kernel_of_a_vector_space_is_a_linear_manifold} and take the quotient
space wrt to a similar kernel.
However, the proof of the triangle inequality of $\nu_p$ is a non-trivial
exercise.
\begin{defn}[Lebesgue Conjugate]\index{Lebesgue Conjugate}\label{defn:lebesgue_conjugate}
Let $1 \leq p \leq \infty$. We associate to $p$ the number $1 \leq q \leq
\infty$ as follows:
\begin{itemize}
\item if $p = 1$, then $q = \infty$ ;
\item if $p = \infty$, then $q = 1$ ; and finally
\item $1 < p < \infty \implies$
\begin{equation*}
q = \left( 1 - \frac{1}{p} \right)^{-1}.
\end{equation*}
\end{itemize}
We say that $q$ is the \hlnoteb{Lebesgue conjugate} of $p$. With the
convention that $\frac{1}{\infty} \coloneqq 0$, and we see that in all cases,
\begin{equation*}
\frac{1}{p} + \frac{1}{q} = 1.
\end{equation*}
\end{defn}
\begin{note}
When $1 < p < \infty$, we see that the above equation is equivalent to each of
the equations:
\begin{itemize}
\item $p(q - 1) = q$ and
\item $(p - 1)q = p$.
\end{itemize}
\end{note}
\begin{lemma}[Young's Inequality]\index{Young's Inequality}\label{lemma:young_s_inequality}
If $1 < p < \infty$ and $q$ is the Lebesgue conjugate of $p$, then for $0 < a,
b \in \mathbb{R}$,
\begin{enumerate}
\item $ab \leq \frac{a^p}{p} + \frac{b^q}{q}$; and
\item equality in the above holds iff $a^p = b^q$.
\end{enumerate}
\end{lemma}
\marginnote{There's another proof that I prefer over this construction here that
feels like we just lucked out. See
\href{https://tex.japorized.ink/PMATH351F18/classnotes.pdf\#lemma.27}{PMATH351}.}
\begin{proof}
Let $g : (0, \infty) \to \mathbb{R}$ be such that
\begin{equation*}
x \mapsto \frac{1}{p} xp ^+ \frac{1}{q} - x.
\end{equation*}
Notice that $g$ is differentiable on $(0, \infty)$, and we have
\begin{equation*}
g'(x) = x^{p-1} - 1.
\end{equation*}
Furthermore,
\begin{itemize}
\item $g'(x) < 0$ for $x \in (0, 1)$;
\item $g'(1) = 0$; and
\item $g'(x) > 0$ for $x \in (1, \infty)$.
\end{itemize}
Also, note that $g(1) = \frac{1}{p} + \frac{1}{q} - 1 = 0$. Thus by the above
observation, we know that $g$ attains its minimum at $1$. Let $x_0 =
\frac{a}{b^{q-1}} > 0$. Then we have
\begin{align*}
0 \leq g(x_0)
&= \frac{1}{p} \left( \frac{a^p}{b^{(q-1)p}} \right) + \frac{1}{q} -
\frac{a}{b^{q-1}} \\
&= \frac{1}{p} \frac{a^p}{b^q} + \frac{1}{q} - \frac{a}{b^{q-1}}.
\end{align*}
Thus
\begin{equation*}
\frac{a}{b^{q-1}} \leq \frac{1}{p} \frac{a^p}{b^q} + \frac{1}{q}.
\end{equation*}
Multiplying both sides by $b^q$, we get
\begin{equation*}
ab \leq \frac{1}{p} a^p + \frac{1}{q} b^q.
\end{equation*}
Furthermore, we notice that
\begin{equation*}
g(x_0) = 0 \iff x_0 = 1 \iff a = b^{q-1} \iff a^p = b^{p(q-1)} = b^q.
\end{equation*}
\end{proof}
% section _l_p_spaces (end)
% chapter lecture_10_jun_06th_2019 (end)
\chapter{Lecture 11 Jun 11th 2019}%
\label{chp:lecture_11_jun_11th_2019}
% chapter lecture_11_jun_11th_2019
\section{\texorpdfstring{$L_p$}{Lp} Spaces Continued}%
\label{sec:_l_p_spaces_continued}
% section _l_p_spaces_continued
\begin{thm}[H\"{o}lder's Inequality]\index{H\"{o}lder's Inequality}\label{thm:holders_inequality}
Let $E \in \mathfrak{M}(\mathbb{R})$, $1 < p < \infty$ in $\mathbb{R}$, and
let $q$ be the Lebesgue conjugate of $p$. Then
\begin{enumerate}
\item If $f \in \mathcal{L}_p(E, \mathbb{K})$ and $g \in \mathcal{L}_q(E,
\mathbb{K})$, then $fg \in \mathcal{L}_1(E, \mathbb{K})$ and
\begin{equation*}
\nu_1(fg) \leq \nu_p(f) \nu_q(g),
\end{equation*}
where
\begin{equation*}
\nu_p(f) = \left( \int_{E} \abs{f}^p \right)^{\frac{1}{p}} \text{ and }
\nu_q(g) = \left( \int_{E} \abs{g}^q \right)^{\frac{1}{q}}
\end{equation*}
\item Suppose that $H \coloneqq \{ x \in E : f(x) \neq 0 \}$ has positive
measure. If
\begin{equation*}
f^* \coloneqq \nu_p(f)^{1-p} \overline{\Theta} \abs{f}^{p-1},
\end{equation*}
which is called the \hldefn{Lebesgue conjugate function}, then $f^* \in
\mathcal{L}_q(E, \mathbb{K})$, $\nu_q(f) = 1$, and
\begin{equation*}
\nu_1(ff^*) = \int_{E} ff^* = \nu_p(f).
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\begin{enumerate}
\item If $f = 0$ or $g = 0$ a.e. on $E$, then the inequality is trivially
true. So wma $f \neq 0 \neq g$ a.e. on $E$. Now, for any $\alpha, \beta
\in \mathbb{K}$, $\alpha f \in \mathcal{L}_p(E, \mathbb{K})$ and $\beta g
\in \mathcal{L}_q(E, \mathbb{K})$ since
\begin{equation*}
\int_{E} \alpha f = \alpha \int_{E} f < \infty
\end{equation*}
and
\begin{equation*}
\int_{E} \beta g = \beta \int_{E} g < \infty.
\end{equation*}
Supposing that we can find $\alpha_0 \neq 0 \neq \beta_0$ such that
\begin{equation*}
\int_{E} \abs{(\alpha_0 f)(\beta_0 g)} \leq \nu_p(\alpha_0 f)
\nu_q(\beta_0 g),
\end{equation*}
we see that we can factor out $\alpha_0$ and $\beta_0$ so that
\begin{equation*}
\abs{\alpha_0 \beta_0} \int_{E} \abs{fg} \leq \abs{\alpha_0 \beta_0}
\nu_p(f) \nu_q(g),
\end{equation*}
which then
\begin{equation*}
\int_{E} \abs{fg} \leq \nu_p(f) \nu_q(g).
\end{equation*}
Thus, choosing $\alpha_0 = \nu_p(f)^{-1}$ and $\beta_0 = \nu_q(g)^{-1}$,
wma wlog $\nu_p(f) = 1 = \nu_q(g)$.
Now, by \cref{lemma:young_s_inequality}, we obtain
\begin{equation*}
\abs{fg} \leq \frac{\abs{f}^p}{p} + \frac{\abs{g}^q}{q}.
\end{equation*}
Thus
\begin{align*}
\nu_1(fg) &= \int_{E} \abs{fg}
\leq \frac{1}{p} \int_{E} \abs{f}^p + \frac{1}{q} \int_{E}
\abs{g}^q \\
&= \frac{1}{p} \nu_p(f)^p + \frac{1}{q} \nu_q(g)^q \\
&= \frac{1}{p} \cdot 1 + \frac{q}{1} \cdot 1 \\
&= 1 = \nu_p(f) \nu_q(g).
\end{align*}
\item First, note that $f^*$ is measurable, since $f, \abs{f}$ and $\Theta$
are all measurable (cf.
\cref{propo:measurable_function_broken_down_into_an_absolute_part_and_a_scaling_part}
and \cref{propo:_l_e_k_is_a_unital_algebra}). Since $(p-1)q = p$, we have
\sidenote{\hlwarn{How did $\Theta$ disappear?}}
\begin{align*}
\nu_q(f^*)^q &= \int_{E} \abs{f^*}^q = \int_{E} \left( \nu_p(f)^{1-p}
\abs{f}^{p-1} \right)^q \\
&= \nu_p(f)^{-(p-1)q} \int_{E} \abs{f}^{(p-1)q} \\
&= \nu_p(f)^{-p} \nu_p(f)^{p} = 1.
\end{align*}
Finally,
\begin{align*}
\nu_1(ff^*) &= \int_{E} \abs{ff^*}
= \int_{E} \nu_p(f)^{1-p} \abs{f}^{p-1} \abs{f} \\
&= \nu_p(f)^{1-p} \int_{E} \abs{f}^p \\
&= \nu_p(f)^{1-p} \nu_p(f)^{p} \\
&= \nu_p(f).
\end{align*}
\end{enumerate}
\end{proof}
\begin{thm}[Minkowski's Inequality]\index{Minkowski's Inequality}\label{thm:minkowski_s_inequality}
Let $E \in \mathfrak{M}(\mathbb{R})$, $1 < p < \infty$. If $f, g \in
\mathcal{L}_p(E, \mathbb{K})$, then $f + g \in \mathcal{L}_p(E, \mathbb{K})$
and
\begin{equation*}
\nu_p(f + g) \leq \nu_p(f) + \nu_p(g).
\end{equation*}
\end{thm}
\begin{proof}
$f + g$ is measurable by \cref{propo:_l_e_k_is_a_unital_algebra}. Notice that
for $0 \leq a, b$, we have
\begin{equation*}
(a + b)^p \leq \left( 2 \max \{ a, b \} \right)^p \leq 2^p (a^p + b^p).
\end{equation*}
Thus
\begin{equation*}
\abs{f + g}^p \leq \left( \abs{f} + \abs{g} \right)^p \leq 2^p \left(
\abs{f}^p + \abs{g}^p \right).
\end{equation*}
It follows that
\begin{equation*}
\nu_p(f + g) = \int_{E} \abs{f + g}^p \leq 2^p \left( \nu_p(f)^p +
\nu_p(g)^p \right) < \infty.
\end{equation*}
Thus $f + g \in \mathcal{L}_p(E, \mathbb{K})$.
Now let $h = f + g$, and $h^*$ the Lebesgue conjugate function of $h$. Then
$h^* \in \mathcal{L}_q(E, \mathbb{K})$. By the last theorem, $\nu_q(h) = 1$
and $\nu_1(hh^*) = \nu_p(h)$. With this, and
\hyperref[thm:holders_inequality]{H\"{o}lder's Inequality}, we have
\begin{align*}
\nu_p(f + g) &= \nu_p(h) = \nu_1(hh^*) \\
&= \nu_1((f + g)h^*) \\
&\leq \nu_1(f h^*) + \nu_1(g h^*) \\
&\overset{(*)}{\leq} \nu_p(f) \nu_q(h^*) + \nu_p(g) \nu_q(h^*)
\\
&= \nu_p(f) + \nu_p(g),
\end{align*}
where $(*)$ is where we use H\"{o}lder's Inequality.
\end{proof}
We are finally ready to show that $\mathcal{L}_p (E, \mathbb{K})$ is a vector
space and $\nu_p$ is a semi-norm as claimed.
\begin{crly}[$\nu_p$ is a Semi-Norm]\label{crly:_nu_p_is_a_semi_norm}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $1 < p < \infty$. Then
$\mathcal{L}_p(E, \mathbb{K})$ is a vector space over $\mathbb{K}$ and
$\nu_p$ defines a semi-norm on $\mathcal{L}_p(E, \mathbb{K})$.
\end{crly}
\begin{proof}
\hlbnoted{$\mathcal{L}_p(E, \mathbb{K})$ is a vector space} Since $\mathbb{K}$
is a vector space, we need only check that $\mathcal{L}_p(E, \mathbb{K})$ is
nonempty, and closed under addition and scalar multiplication.
\noindent
\hlbnotec{$\mathcal{L}_p(E, \mathbb{K}) \neq \emptyset$} It is clear that the
constant function, $f(x) = 0$ for all $x \in E$, is in $\mathcal{L}_p(E,
\mathbb{K})$ since
\begin{equation*}
\int_{E} f = \int_{E} 0 = 0 < \infty.
\end{equation*}
\noindent
\hlbnotec{$\mathcal{L}_p(E, \mathbb{K})$ is closed under addition and scalar
multiplication} Let $f, g \in \mathcal{L}_p(E, \mathbb{K})$ and $\kappa \in
\mathbb{K}$. Then by Minkowski's Inequality,
\begin{equation*}
\nu_p(\kappa f + g) \leq \nu_p(\kappa f) + \nu_p(g) = \abs{\kappa} \nu_p(f)
+ \nu_p(g) < \infty.
\end{equation*}
\noindent
\hlbnoted{$\nu_p$ is a semi-norm} We showed for the first two conditions and
MinkMinkowski's Inequality covers the Triangle Inequality.
\end{proof}
\begin{defn}[$L_p$-Space and $L_p$-Norm]\index{$L_p$-Space}\index{$L_p$-Norm}\label{defn:_l_p_space}\label{defn:_l_p_norm}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $1 < p < \infty$. We define the
\hlnoteb{$L_p$-space}
\begin{equation*}
L_p(E, \mathbb{K}) \coloneqq \mathcal{L}_p(E, \mathbb{K}) / \mathcal{N}_p(E,
\mathbb{K}),
\end{equation*}
where \sidenote{Note that $\mathcal{N}_p(E, \mathbb{K})$ is where functions
are $0$ a.e.}
\begin{equation*}
\mathcal{N}_p(E, \mathbb{K}) = \{ f \in \mathcal{L}_p(E, \mathbb{K}) :
\nu_p(f) = 0 \}.
\end{equation*}
The \hlnoteb{$L_p$-norm} on $L_p(E, \mathbb{K})$ is the norm defined by
\begin{align*}
\norm{\cdot}_p : L_p(E, \mathbb{K}) &\to \mathbb{R} \\
[f] &\mapsto \nu_p(f).
\end{align*}
\end{defn}
For the sake of completeness, we shall restate H\"{o}lder's and Minkowski's
Inequalities for $L_p(E, \mathbb{K})$.
\begin{thm}[H\"{o}lder's Inequality]\index{H\"{o}lder's Inequality}\label{thm:holders_inequality_lp_spaces}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $1 < p < \infty$. Let $q$ denote the
Lebesgue conjugate of $p$.
\begin{enumerate}
\item If $[f] \in L_p(E, \mathbb{K})$ and $[g] \in L_q(E, \mathbb{K})$, then
$[f][g] \coloneqq [fg] \in L_1(E, \mathbb{K})$ is well-defined and
\begin{equation*}
\norm{[fg]}_1 \leq \norm{[f]}_p \norm{[g]}_q.
\end{equation*}
\item If $0 \neq [f] \in L_p(E, \mathbb{K})$ and $f^*$ is the conjugate
function of $f$, then $[f^*] \in L_q(E, \mathbb{K})$, $\norm{[f^*]}_q =
1$, and
\begin{equation*}
\norm{[f][f^*]} = \norm{[f]}_p.
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
The only part that does not follow immediately from
\cref{thm:holders_inequality} is the well-definedness of $[f][g] = [fg]$.
\end{proof}
\begin{thm}[Minkowski's Inequality]\index{Minkowski's Inequality}\label{thm:minkowski_s_inequality_lp_spaces}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $1 < p < \infty$. If $[f], [g] \in
L_p(E, \mathbb{K})$, then $[f + g] \in L_p(E, \mathbb{K})$ and
\begin{equation*}
\norm{[f + g]}_p = \norm{[f] + [g]}_p \leq \norm{[f]}_p + \norm{[g]}_p.
\end{equation*}
\end{thm}
We can now show that $L_p(E, \mathbb{K})$ is a Banach space for all $1 \leq p <
\infty$, whose proof shall be left for next lecture.
% section _l_p_spaces_continued (end)
% chapter lecture_11_jun_11th_2019 (end)
\chapter{Lecture 12 Jun 18th 2019}%
\label{chp:lecture_12_jun_18th_2019}
% chapter lecture_12_jun_18th_2019
\section{\texorpdfstring{$L_p$}{Lp} Spaces (Continued 2)}%
\label{sec:_l_p_spaces_continued_2}
% section _l_p_spaces_continued_2
\begin{thm}[$(L_p(E, \mathbb{K}), \norm{\cdot}_p)$ is Banach Space]\label{thm:lp_spaces_are_banach}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $1 \leq p < \infty$. Then $L_p(E<
\mathbb{K})$ is complete and hence Banach.
\end{thm}
\begin{strategy}
By \cref{propo:kernel_of_a_vector_space_is_a_linear_manifold}, $(L_p(E,
\mathbb{K}), \norm{\cdot}_p)$ is a normed linear space. It thus suffices for
us to show that it is complete.
This is a preferred approach by the professor, that he has defaulted to
proving completeness from the equivalent result of having every absolutely
summable series being summable in the space. We prove this equivalence in A4.
So given an absolutely summable sequence $\{ [f_n] \}_{n=1}^{\infty}$, since
we want
\begin{equation*}
\sum_{n=1}^{\infty} [f_n] < \infty \text{ a.e. on } E,
\end{equation*}
in particular this should be reflected by any of its representatives, i.e. if
we take, wlog, $f_n$ as the representative of $[f_n]$, then we want
\begin{equation*}
h = \sum_{n=1}^{\infty} f_n < \infty \text{ a.e. on } E.
\end{equation*}
To show that the sum is finite a.e. on $E$, we will first make use of the fact
that this would be equivalent to
\begin{equation*}
\abs{h} = \abs{ \sum_{n=1}^{\infty} f_n } < \infty \text{ a.e. on } E.
\end{equation*}
To that end, the partial sums should always be finite. By the triangle
inequality, we see that
\begin{equation*}
\abs{ \sum_{n=1}^{N} f_n } \leq \sum_{n=1}^{N} \abs{f_n}.
\end{equation*}
This is where our `clean' proof begins.
\end{strategy}
\begin{proof}
Suppose $\{ [f_n] \}_{n=1}^{\infty}$ is a sequence of equivalence classes in
$L_p(E, \mathbb{K})$ that is absolutely summable. We note that the following
value will be useful, and so we give it a variable.
\begin{equation*}
\gamma \coloneqq \sum_{i=1}^{\infty} \norm{[f_n]}_p.
\end{equation*}
\noindent
\hlbnoted{Showing that $\sum_{n=1}^{\infty} f_n(x)$ converges a.e. on $E$} For
each $N \geq 1$, let $g_N = \sum_{n=1}^{N} \abs{f_n}$. Note that since $f_n
\in \mathcal{L}_p(E, \mathbb{K})$, by \cref{crly:_nu_p_is_a_semi_norm}, we
have that $g_N \in \mathcal{L}_p(E, [0, \infty])$. Furthermore, since $g_N$ is
a sum of absolute values, we have that
\begin{equation*}
0 \leq g_1 \leq g_2 \leq g_3 \leq \hdots.
\end{equation*}
Let $g_\infty \coloneqq \lim_{N \to \infty} g_N = \sup_{N \geq 1} g_N$. By
\cref{propo:measurability_of_limits_and_extremas}, $g_\infty \in
\mathcal{L}(E, [0, \infty])$. \sidenote{Now, we want to show that even
$g_\infty < \infty$ a.e. on $E$. Following this is a non-trivial step
forward.} Note that $g_\infty^p = \sup_{N \geq 1} g_N^p$. By the
\hyperref[thm:the_monotone_convergence_theorem]{Monotone Convergence Theorem},
we observe that
\begin{align*}
\int_{E} g_\infty^p &= \lim_{N \to \infty} \int_{E} g_N^p \\
&= \lim_{N \to \infty} \int_{E} \left(\sum_{n=1}^{N}
\abs{f_n}\right)^p \\
&= \lim_{N \to \infty} \int_{E} \\
&= \lim_{N \to \infty} \nu_p \left( \sum_{n=1}^{N}
\abs{f_n} \right)^p \\
&\leq \lim_{N \to \infty} \left(\sum_{n=1}^{N}
\nu_p(\abs{f_n})\right)^p \\
&\leq \left( \sum_{n=1}^{\infty} \norm{[f_n]}_p
\right)^p = \gamma^p < \infty
\end{align*}
by assumption. Thus $g_\infty \in \mathcal{L}_p(E, \mathbb{K})$, which means
that $g_\infty < \infty$ a.e. on $E$. From here, we observe that
\begin{equation*}
\abs{ \sum_{n=1}^{\infty} f_n } \leq \sum_{n=1}^{\infty} \abs{f_n} \leq
g_\infty \leq \gamma < \infty.
\end{equation*}
Then since $\mathbb{K}$ is complete, $\sum_{n=1}^{\infty} f_n(x)$ converges to
some value in $\mathbb{K}$ for every $x \in E$.
\noindent
\hlbnoted{Constructing $h = \sum_{n=1}^{\infty} f_n$ a.e. on $E$} In
particular, we want the above sum to converge to some function $h =
\sum_{n=1}^{\infty} f_n$ a.e. on $E$. We want to explicitly isolate the points
where the sum goes bad. Letting
\begin{equation*}
B \coloneqq \{ x \in E : g_\infty(x) = \infty \} \subseteq E,
\end{equation*}
we have that $mB = 0$. Consider $H = E \setminus B \in \mathfrak{M}(E)$.
\sidenote{We will build $h$ on this nicer set.} Here, let $g = \chi_H \cdot
g_\infty$. Note that since $H \in \mathfrak{M}(E)$, $\chi_H$ is measurable,
and so by \cref{propo:_l_e_k_is_a_unital_algebra}, $g \in \mathcal{L}(E,
[0, \infty))$, and $g = g_\infty$ a.e. on $E$. Furthermore,
\begin{equation*}
\int_{E} g^p = \int_{E} g_\infty^p \leq \gamma^p,
\end{equation*}
and so $g \in \mathcal{L}_p(E, [0, \infty)) \subseteq L_p(E, \mathbb{K})$,
i.e. $[g] \in L_p(E, \mathbb{K})$ and $\norm{[g]}_p \leq \gamma$.
For each $N \geq 1$, let $h_N \coloneqq \chi_H \cdot
\left( \sum_{n=1}^{N} f_n \right)$. By the same reasoning as for $g$, we have
that $h_N \in \mathcal{L}_p(E, \mathbb{K}) \subseteq \mathcal{L}(E,
\mathbb{K})$. Moreover, it is clear from construction that $[h_N] =
\sum_{n=1}^{N} [f_n]$, since $h_N = \sum_{n=1}^{N} f_n$ a.e. on $E$, in
particular, they agree on $H$. It is also important to note that for $x \in
H$,
\begin{equation*}
\abs{h_N(x)} \leq \sum_{n=1}^{N} \abs{f_n(x)} \leq g(x),
\end{equation*}
and for $x \in B$, $\abs{h_N(x)} = 0 = g(x)$. Thus $\abs{h_N} < g$, and so
$\abs{h_N}^p \leq g^p$. So for each $N \geq 1$, we have
\begin{equation*}
\int_{E} \abs{h_N}^p \leq \int_{E} g^p \leq \gamma^p.
\end{equation*}
Since the partials are all well-defined, we can define
\begin{equation*}
h(x) \coloneqq \lim_{N \to \infty} h_N(x) \in \mathbb{K} \text{ for } x \in
E.
\end{equation*}
Again, by \cref{propo:measurability_of_limits_and_extremas}, $h \in
\mathcal{L}(E, \mathbb{K})$. Furthermore, since each $\abs{h_N} \leq g$, we
have that $\abs{h} \leq g$ and $\abs{h}^p \leq g^p$, which then
\begin{equation*}
\int_{E} \abs{h}^p \leq \int_{E} g^p \leq \gamma^p < \infty.
\end{equation*}
It follows that $h \in \mathcal{L}_p(E, \mathbb{K})$ and $[h] \in L_p(E,
\mathbb{K})$.
\noindent
\hlbnoted{$[h] = \lim_{N \to \infty} [h_N]$} It remains for us to show that
this equation is true. In other words, we want to show that
\begin{equation*}
\lim_{N \to \infty} \norm{[h] - [h_N]}_p = \lim_{N \to \infty} \norm{[h] -
\sum_{n=1}^{N} [f_n]}_p = 0.
\end{equation*}
Note that $\abs{h_M - h_N}^p \leq \left( \abs{h_M} + \abs{h_N} \right)^p \leq
(g + g)^p$ for any $M, N$, and $\int_{E} (2 \abs{g})^p < \infty$. Then,
satisfying the condition for the
\hyperref[thm:lebesgue_dominated_convergence_theorem]{Lebesgue Dominated
Convergence Theorem}, we have
\begin{align*}
\norm{[h] - [h_N]}_p
&= \nu_p(h - h_N) \\
&= \left( \int_{E} \abs{h - h_N}^p \right)^{\frac{1}{p}} \\
&= \left( \int_{E} \lim_{M \to \infty} \abs{h_M - h_N}^p
\right)^{\frac{1}{p}} \\
&= \left( \lim_{M \to \infty} \int_{E} \abs{h_M - h_N}^p
\right)^{\frac{1}{p}} \\
&= \lim_{M \to \infty} \left( \int_{E} \abs{h_M - h_N}^p
\right)^{\frac{1}{p}} \\
&= \lim_{M \to \infty} \norm{[h_M] - [h_N]}_p \\
&= \lim_{M \to \infty} \norm{\sum_{n = N + 1}^{M} [f_n]}_p \\
&\leq \lim_{M \to \infty} \sum_{n=N+1}^{M} \norm{[f_n]}_p \\
&= \sum_{n=N+1}^{\infty} \norm{[f_n]}_p
\end{align*}
Since $\sum_{n=1}^{\infty} \norm{[f_n]}_p = \gamma^p < \infty$ by assumption,
we have that
\begin{equation*}
\lim_{N \to \infty} \norm{[h] - [h_N]}_p = \lim_{N \to \infty}
\sum_{n=N+1}^{\infty} \norm{[f_n]}_p = 0.
\end{equation*}
This completes the proof.
\end{proof}
Notice that in \cref{thm:lp_spaces_are_banach} we talked about $1 \leq p <
\infty$ but not $p = \infty$ itself. We shall explore this in the following
subsection.
\subsection{Completeness of \texorpdfstring{$L_\infty(E, \mathbb{K})$}{Linfty(E, K)}}%
\label{sub:completeness_of_l_infty_e_k_}
% subsection completeness_of_l_infty_e_k_
We need to first clarify what the norm in $L_\infty(E, \mathbb{K})$ is. It would
be sensible to immediately let the norm be the supremum of the function, but we
want to exclude the places where $f$ hit its `suprema' only up to a set of
measure zero.
\begin{defn}[Essential Supremum]\index{Essential Supremum}\label{defn:essential_supremum}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $f \in \mathcal{L}(E, \mathbb{K})$.
We define the \hlnoteb{essential supremum} of $f$ on $E$ as
\begin{equation*}
\nu_\infty(f) \coloneqq \inf \left\{ \gamma > 0 : m \left( \{ x \in E :
\abs{f(x)} > \gamma \} \right) = 0 \right\}.
\end{equation*}
\end{defn}
\begin{note}
\begin{enumerate}
\item Let us try to describe the essential supremum in words: we pick out
the smallest $\gamma$ (specifically, we pick the inf) such that the places
on $E$ where $f > \gamma$ is measure zero. Graphically, we set lower and
lower $\gamma$ until we finally hit some value where $f > \gamma$ but the
places where this happens is no longer of measure zero.
\item Simply by definition, we have that $\nu_\infty(f) \geq 0$ for any $f
\in \mathcal{L}(E, \mathbb{K})$.
\end{enumerate}
\end{note}
\begin{defn}[$\mathcal{L}_\infty(E, \mathbb{K})$]\index{$\mathcal{L}_\infty(E, \mathbb{K})$}\label{defn:_call_infty_e_k_}
With the essential supremum, we can define
\begin{equation*}
\mathcal{L}_\infty(E, \mathbb{K}) = \{ f \in \mathcal{L}(E, \mathbb{K}),
\nu_\infty(f) < \infty \}.
\end{equation*}
\end{defn}
\begin{eg}\label{eg:continuous_functions_are_in_L_infty}
\begin{enumerate}
\item Let $E = \mathbb{R}$ and $f = \chi_{\mathbb{Q}}$. Observe that for any
$\gamma > 0$, since
\begin{equation*}
\{ x \in \mathbb{R} : \abs{ \chi_{\mathbb{Q}} } > \gamma \} \subseteq
\mathbb{Q},
\end{equation*}
we have
\begin{equation*}
0 \leq m \{ x \in \mathbb{R} : \abs{ \chi_{\mathbb{Q}} } > \gamma \}
\leq m \mathbb{Q} = 0.
\end{equation*}
Thus $\nu_\infty(\chi_\mathbb{Q}) = 0$.
Note that there was nothing special about the choice of $\mathbb{Q}$
except that it is a set of measure zero.
\item Suppose $a < b \in \mathbb{R}$ and $f \in \mathcal{C}([a, b] ,
\mathbb{K})$.
\hlbnotec{Claim: $f \in \mathcal{L}_\infty([a, b], \mathbb{K})$ and
$\nu_\infty(f) = \norm{f}_{\sup} \coloneqq \sup_{x \in [a, b]
\abs{f(x)}}$}
We know that every continuous function on a measurable set is
measurable
\sidenote{cf.
\cref{propo:continuous_functions_on_a_measurable_set_is_measurable}},
so $f \in \mathcal{L}([a, b], \mathbb{K})$.
Note that for $\gamma > \norm{f}_{\sup}$, we have that
\begin{equation*}
m \left( \left\{ x \in [a, b] : \abs{f(x)} > \gamma \right\} \right) =
m(\emptyset) = 0.
\end{equation*}
So $\nu_\infty(f) \leq \gamma$. Since this holds for all $\gamma$, it
follows that $\nu_\infty(f) \leq \abs{f}_{\sup}$.
On the other hand, for $\gamma \leq \norm{f}_{\sup} = \abs{f(x_0)}$ for some
$x_0 \in [a, b]$. By continuity of $f$ on $[a, b]$, and in particular on
$x_0$, $\exists \delta > 0$ such that $\forall x \in (x_0 - \delta, x_0 +
\delta) \cap [a, b]$ implies that $\abs{f(x)} > \gamma$. Notice that
\begin{equation*}
m \left( (x_0 - \delta, x_0 + \delta) \cap [a, b] \right) > 0,
\end{equation*}
which means that
\begin{equation*}
\nu_\infty(f) \geq \gamma.
\end{equation*}
This holds for all $\gamma$, and so
\begin{equation*}
\nu_\infty(f) \geq \norm{[f]}_{\sup}.
\end{equation*}
Thus
\begin{equation*}
\nu_\infty(f) = \norm{[f]}_{\sup},
\end{equation*}
which also gives us that
\begin{equation*}
f \in \mathcal{L}_\infty([a, b], \mathbb{K}).
\end{equation*}
\end{enumerate}
\end{eg}
\begin{propo}[$\mathcal{L}_\infty(E, \mathbb{K})$ is a vector space and $\nu_\infty(\cdot)$ a semi-norm]\label{propo:_l_infty_e_k_is_a_vector_space_and_nu_infty_cdot_a_semi_norm}
Let $E \in \mathfrak{M}(\mathbb{R})$. Then $\mathcal{L}_\infty(E, \mathbb{K})$
is a vector space over $\mathbb{K}$ and $\nu_\infty(\cdot)$ is a semi-norm on
$\mathcal{L}_\infty(E, \mathbb{K})$.
\end{propo}
\begin{proof}
Since $\mathcal{L}_\infty(E, \mathbb{K}) \subseteq \mathcal{L}(E,
\mathbb{K})$, and that the latter is a vector space, it suffices to perform
the subspace test on $\mathcal{L}_\infty(E, \mathbb{K})$ to show that
$\mathcal{L}_\infty(E, \mathbb{K})$ is a vector space.
First, note that if $\zeta = 0$ is the zero function, then $\nu_\infty(\zeta)
= 0 < \infty$, and so $\zeta \in \mathcal{L}_\infty(E, \mathbb{K})$, i.e.
$\mathcal{L}_\infty(E, \mathbb{K}) \neq \emptyset$. Further, as noted before,
$\nu_\infty(f) \geq 0$ for any $f \in \mathcal{L}_\infty(E, \mathbb{K})$.
Next, suppose that $f \in \mathcal{L}_\infty(E, \mathbb{K})$ and $0 \neq
\kappa \in \mathbb{K}$.
It is clear that $\kappa f \in \mathcal{L}(E, \mathbb{K})$,
and we quickly notice that
\begin{align*}
\nu_\infty(\kappa f)
&= \inf \{ \gamma > 0 : m \{ x \in E : \abs{\kappa f(x)} > \gamma \} = 0 \}
\\
&= \inf \{ \abs{\kappa} \delta : m \{ x \in E : \abs{\kappa}\abs{f(x)} >
\abs{\kappa} \delta \} = 0 \} \\
&= \abs{\kappa} \inf \{ \delta > 0 : m \{ x \in E : \abs{f(x)} > \delta \} =
0 \} \\
&= \abs{\kappa} \nu_\infty(f) < \infty.
\end{align*}
So $\kappa f \in \mathcal{L}_\infty(E, \mathbb{K})$ for all $0 \neq \kappa \in
\mathbb{K}$.
As noted before, if $\kappa = 0$,
then $\kappa f = 0 \in \mathcal{L}_\infty(E, \mathbb{K})$.
Now suppose $f, g \in \mathcal{L}_\infty(E, \mathbb{K})$.
WTS
\begin{equation*}
\nu_\infty(f + g) \leq \nu_\infty(f) + \nu_\infty(g).
\end{equation*}
Let $\alpha > \nu_\infty(f)$ and $\beta > \nu_\infty(g)$. Let
\begin{equation*}
E_f = \{ x \in E : \abs{f(x)} > \alpha \} \text{ and }
E_g = \{ x \in E : \abs{g(x)} > \beta \}.
\end{equation*}
Then $mE_f = 0 = mE_g$.
Let $H = E \setminus (E_f \cup E_g)$.
For $x \in H$, we have
\begin{equation*}
\abs{(f + g)(x)} \leq \abs{f(x)} + \abs{g(x)} \leq \alpha + \beta,
\end{equation*}
so
\begin{equation*}
\{ x \in E : \abs{(f + g)(x)} > \alpha + \beta \} \subseteq E_f \cup E_g.
\end{equation*}
Thus
\begin{equation*}
m \left\{ x \in E : \abs{(f + g)(x)} > \alpha + \beta \right\} \leq mE_f +
mE_g = 0,
\end{equation*}
and so $\nu_\infty(f + g) \leq \alpha + \beta$.
Since $\alpha$ and $\beta$ were arbitrary,
it follows that
\begin{equation*}
\nu_\infty(f + g) \leq \nu_\infty(f) + \nu_\infty(g) < \infty.
\end{equation*}
Thus $\mathcal{L}_\infty(E, \mathbb{K})$ and $\nu_\infty(\cdot)$ is indeed a
semi-norm.
\end{proof}
\begin{defn}[$L_\infty(E, \mathbb{K})$]\index{$L_\infty(E, \mathbb{K})$}\label{defn:_l_infty_e_k_}
Let
\begin{equation*}
\mathcal{N}_\infty(E, \mathbb{K}) \coloneqq \{ f \in \mathcal{L}_\infty(E,
\mathbb{K}) : \nu_\infty(f) = 0 \}.
\end{equation*}
Then we define
\begin{equation*}
L_\infty(E, \mathbb{K}) = \mathcal{L}_\infty(E, \mathbb{K}) /
\mathcal{N}_\infty(E, \mathbb{K}),
\end{equation*}
and we denote by $[f]$ the coset of $f \in \mathcal{L}_\infty(E, \mathbb{K})$
in $\mathcal{L}_\infty(E, \mathbb{K})$.
\end{defn}
\begin{thm}[$L_\infty(E, \mathbb{K})$ is a normed-linear space]\label{thm:_l_infty_e_k_is_a_normed_linear_space}
Let $E \in \mathfrak{M}(\mathbb{R})$.
Then $L_\infty(E, \mathbb{K})$ is a normed-linear space,
where for $[f] \in L_\infty(E, \mathbb{K})$ we set
\begin{equation*}
\norm{[f]}_\infty \coloneqq \nu_\infty(f).
\end{equation*}
\end{thm}
\begin{proof}
See \cref{propo:kernel_of_a_vector_space_is_a_linear_manifold}.
\end{proof}
\begin{remark}\label{remark:cursivel_infty_supremeness_of_the_essential_sup}
Let $f \in \mathcal{L}_\infty(E, \mathbb{K})$.
Let us look at the places where the undesirable happens.
For each $n \geq 1$, let
\begin{equation*}
B_n \coloneqq \left\{ x \in E :
\abs{f(x)} > \nu_{\infty} (f) + \frac{1}{n} \right\}.
\end{equation*}
Then by \hyperref[defn:essential_supremum]{definition} of $\nu_\infty(\cdot)$,
we have that $mB_n = 0$ for each $n \geq 1$,
and letting
\begin{equation*}
B \coloneqq \bigcup_{n=1}^{\infty} B_n = \left\{ x \in E : \abs{f(x)} >
\nu_\infty(f) \right\},
\end{equation*}
we have that
\begin{equation*}
mB \leq \sum_{n=1}^{\infty} mB_n = \sum_{n=1}^{\infty} 0 = 0.
\end{equation*}
In other words, for any $f \in \mathcal{L}_\infty(E, \mathbb{K})$, the set
\begin{equation*}
B = \left\{ x \in E : \abs{f(x)} > \nu_\infty(f) \right\}
\end{equation*}
has measure zero. So for any $[f] \in L_\infty(E, \mathbb{K})$,
we can always pick a representative $g \in [f]$ such that
\begin{equation*}
\abs{g(x)} \leq \norm{[f]}_\infty
\end{equation*}
for all $x \in E$.
In particular, the function $g \coloneqq \chi_{E \setminus B} \cdot f$
is measurable, and differs from $f$ only on $B$, whence $[g] = [f]$,
and we indeed have
\begin{equation*}
\abs{g(x)} \leq \nu_{\infty}(f) = \nu_{\infty}(g) = \norm{[g]}_{\infty}
\end{equation*}
for all $x \in E$.
Moreover,we see that $\nu_{\infty}(f) = 0$ iff $f = 0$ a.e. on $E$,
and so
\begin{equation*}
\mathcal{N}_{\infty}(E, \mathbb{K}) =
\{ f \in \mathcal{L}_{\infty}(E, \mathbb{K}) : f = 0 \text{ a.e. on } E \}.
\end{equation*}
\end{remark}
\begin{thm}[Completeness of $L_\infty(E, \mathbb{K})$]\label{thm:completeness_of_l_infty_e_k_}
Let $E \in \mathfrak{M}(\mathbb{R})$.
Then $L_\infty(E, \mathbb{K})$ is a Banach space.
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
Recall that if $E \in \mathfrak{M}(\mathbb{R})$ and $1 < p < \infty$,
$f \in \mathcal{L}_p(E, \mathbb{K})$ and $g \in \mathcal{L}_q(E, \mathbb{K})$,
where $q$ is the Lebesgue conjugate of $p$,
then \hyperref[thm:holders_inequality]{H\"{o}lder's Inequality}
gives that $fg\ in \mathcal{L}_1(E, \mathbb{K})$ and
\begin{equation*}
\nu_1(fg) \leq \nu_p(f) \nu_q(g).
\end{equation*}
Let's look at $p = 1$.
\begin{thm}[H\"{o}lder's Inequality for $\mathcal{L}_1(E, \mathbb{K})$]\index{H\"{o}lder's Inequality for $\mathcal{L}_1(E, \mathbb{K})$}\label{thm:holder_s_inequality_for_l_1_e_k_}
Let $E \in \mathfrak{M}(\mathbb{R})$ with $mE > 0$.
\begin{enumerate}
\item If $f \in \mathcal{L}_1(E, \mathbb{K})$ and
$g \in \mathcal{L}_\infty(E, \mathbb{K})$, then $fg \in \mathcal{L}_1(E,
\mathbb{K})$ and
\begin{equation*}
\nu_1(fg) \leq \nu_1(f) \nu_\infty(g).
\end{equation*}
\item For $f \in \mathcal{L}_1(E, \mathbb{K})$,
there exists a function $f^* \in \mathcal{L}_\infty(E, \mathbb{K})$
such that $\nu_\infty(f^*) = 1$ and
\begin{equation*}
\nu_1(ff^*) = \int_{E} f \cdot f^* = \nu_1(f).
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\begin{enumerate}
\item By \cref{remark:cursivel_infty_supremeness_of_the_essential_sup},
for $[g] \in L_\infty(E, \mathbb{K})$, we can find, wlog, $g_0 \in [g]$
so that $g_0 = g$ a.e. on $E$, and for all $x \in E$, we have
$\abs{g_0(x)} \leq \nu_{\infty}(g) = \nu_{\infty}(g_0)$.
In particular, we have that for any $f \in \mathcal{L}_1(E, \mathbb{K})$,
$\abs{fg} = \abs{fg_0}$ a.e. on $E$, and we find that
\begin{equation*}
\int_{E} \abs{fg} = \int_{E} \abs{fg_0}.
\end{equation*}
Thus wlog wma $\abs{g(x)} \leq \nu_\infty(g)$ for all $x \in E$.
Then
\begin{equation*}
\nu_1(fg) = \int_{E} \abs{fg} \leq \int_{E} \abs{f} \nu_\infty(g)
= \nu_\infty(g) \int_{E} \abs{f} = \nu_1(f) \nu_\infty(g).
\end{equation*}
\item Set $\Theta : E \to \mathbb{T}$ such that
\begin{equation*}
f = \Theta \cdot \abs{f},
\end{equation*}
where
\begin{equation*}
\Theta(x) = \begin{cases}
\frac{f(x)}{\abs{f(x)}} & \text{ when } f(x) \neq 0 \\
1 & \text{ when } f(x) = 0.
\end{cases}
\end{equation*}
Then with $f^* \coloneqq \overline{\Theta}$,
we have $\nu_\infty(f^*) = 1$, $\abs{f} = ff^*$, and so
\begin{align*}
\nu_1(ff^*) = \int_{E} \abs{ff^*} = \int_{E} \abs{f} = \nu_1(f).
\end{align*}
\end{enumerate}
\end{proof}
\begin{crly}[H\"{o}lder's Inequality for $L_1(E, \mathbb{K})$]\index{H\"{o}lder's Inequality}\label{crly:holder_s_inequality_for_L_1_e_k_}
Let $E \in \mathfrak{M}(\mathbb{R})$.
If $[f] \in L_1(E, \mathbb{K})$ and
$[g] \in \mathcal{L}_\infty(E, \mathbb{K})$,
then $[f][g] \coloneqq [fg] \in \mathcal{L}1(E, \mathbb{K})$ is well-defined
and
\begin{equation*}
\norm{[fg]}_1 \leq \norm{[f]}_1 \norm{[g]}_{\infty}.
\end{equation*}
\end{crly}
\begin{crly}[H\"{o}lder's Inequality for Continuous Functions]\index{H\"{o}lder's Inequality}\label{crly:holder_s_inequality_for_continuous_functions}
Suppose that $a < b \in \mathbb{R}$.
Consider $h \in \mathcal{C}([a, b], \mathbb{K})$
and $f \in \mathcal{L}_1([a, b], \mathbb{K})$.
Then $h \cdot f \in \mathcal{L}_1([a, b], \mathbb{K})$ and
\begin{equation*}
\nu_1(h \cdot f) \leq \nu_1(f) \nu_\infty(h) = \nu_1(f) \norm{h}_{\sup}.
\end{equation*}
\end{crly}
\begin{proof}
Continuous functions are measurable, so $h$ is measurable, and
$\mathcal{L}_\infty([a, b], \mathbb{K})$
with $\norm{h}_{\sup} = \nu_\infty(h)$.
Then it is simply \cref{thm:holder_s_inequality_for_l_1_e_k_}.
\end{proof}
% subsection completeness_of_l_infty_e_k_ (end)
% section _l_p_spaces_continued_2 (end)
% chapter lecture_12_jun_18th_2019 (end)
\chapter{Lecture 13 Jun 20th 2019}%
\label{chp:lecture_13_jun_20th_2019}
% chapter lecture_13_jun_20th_2019
\section{\texorpdfstring{$L_p$}{Lp} Spaces (Continued 3)}%
\label{sec:_l_p_spaces_continued_3}
% section _l_p_spaces_continued_3
\begin{remark}[Containment of $L_p$ Spaces]\label{remark:containment_of_lp_spaces}
Let $E \in \mathfrak{M}(\mathbb{R})$ with $mE < \infty$.
Suppose that $1 \leq p < \infty$,
and that $[f] \in L_\infty(E, \mathbb{K})$,
which then wlog $f \in \mathcal{L}_\infty(E, \mathbb{K})$.
As commented before, $\abs{f(x)} \leq \norm{[f]}_{\infty}$ a.e. on $E$.
Then
\begin{equation*}
\norm{[f]}_p = \int_{E} \abs{f}^p
\leq \int_{E} \norm{[f]}_{\infty}^{p}
= \norm{[f]}_{\infty}^p mE < \infty,
\end{equation*}
which means $[f] \in L_p(E, \mathbb{K})$, with
\begin{equation*}
\norm{[f]}_p \leq \norm{[f]}_{\infty} \left( mE \right)^{\frac{1}{p}}.
\end{equation*}
Thus $L_\infty(E, \mathbb{K}) \subseteq L_p(E, \mathbb{K})$,
$1 \leq p < \infty$ when $mE < \infty$.
Next, consider $1 \leq p < r < \infty$.
Suppose $[g] \in L_r(E, \mathbb{K})$.
Again, wlog $g \in \mathcal{L}_r(E, \mathbb{K})$ and
\begin{align*}
\norm{[g]}_p^p &= \int_{E} \abs{g}^p = \int_{E} (\abs{g}^r)^{\frac{p}{r}}
\leq \int_{E} \max \{ 1, \abs{g}^r \} \\
&\leq \int_{E} 1 + \abs{g}^r = mE + \norm{[g]}_r < \infty.
\end{align*}
So $[g] \in L_p(E, \mathbb{K})$. Thus we see that
\begin{equation*}
L_\infty(E, \mathbb{K}) \subseteq L_r (E, \mathbb{K})
\subseteq L_p(E, \mathbb{K}) \subseteq L_1(E, \mathbb{K}).
\end{equation*}
\end{remark}
\begin{remark}
Suppose $a < b \in \mathbb{R}$. Then from
\cref{eg:continuous_functions_are_in_L_infty}, we have that
\begin{equation*}
[\mathcal{C}([a, b], \mathbb{K})]
\coloneqq \{ [f] : f \in \mathcal{C}([a, b], \mathbb{K}) \}
\subseteq L_\infty([a, b]).
\end{equation*}
Recall that
\begin{equation*}
\mathcal{R}_{\infty}([a, b], \mathbb{K})
= \{ f : [a, b] \to \mathbb{K} : f \text{ is Riemann-integrable and bdd }
\}.
\end{equation*}
By
\cref{crly:bounded_riemann_integrable_functions_are_lebesgue_integrable_complex_version},
$f \in \mathcal{L}([a, b], \mathbb{K})$ and so $[f] \in L_\infty([a, b],
\mathbb{K})$ by virtue of $f$ being bounded.
\end{remark}
\newthought{Our next goal} is to establish that the space
$[\mathcal{C}([a, b], \mathbb{K})]$ is dense in $L_p([a, b], \mathbb{K})$,
for $1 \leq p < \infty$.
\begin{defn}[Closed Span]\index{Closed Span}\label{defn:closed_span}
We define the \hlnoteb{closed span} of a subspace
$\mathcal{B} \subseteq (\mathcal{H}, \norm{\cdot})$ as
\begin{equation*}
\overline{\Span} \mathcal{B}
\coloneqq \{ y \in \mathcal{H} :
\forall \epsilon > 0 \quad \exists x \in \Span \mathcal{B} \quad
\norm{x - y} < \epsilon
\}
\end{equation*}
\end{defn}
\marginnote{Imma use the name from the notes of Prof. \citealp{marcoux2019} for
\cref{lemma:lemma_6_31}, since there's no good expressive name for it.}
\begin{lemma}[Lemma 6.31]\label{lemma:lemma_6_31}
Let $(\mathcal{X}, \norm{\cdot})$ be a normed linear space,
and suppose that $\mathcal{Y}$ and $\mathcal{Z}$ are \hldefn{linear manifolds}
\sidenote{i.e. a vector subspace, but not necessarily closed.}
in $\mathcal{X}$. Suppose $\mathcal{B} \subseteq \mathcal{Y}$ satisfies
\begin{equation*}
\overline{\Span} \mathcal{B} = \mathcal{X}.
\end{equation*}
Then if $\mathcal{B} \subseteq \overline{\mathcal{Z}}$,
then $\overline{\mathcal{Z}} = \mathcal{X}$.
\end{lemma}
\begin{proof}
Let $x \in \mathcal{X} = \overline{\Span} \mathcal{B}$ and $\epsilon > 0$.
Then there exists $\{ b_i \}_{i=1}^{N} \subseteq \mathcal{B}$ and
$\{ k_i \}_{i=1}^{N} \subseteq \mathbb{R}$ such that
\begin{equation*}
\norm{ x - \sum_{n=1}^{N} k_n b_n } < \frac{\epsilon}{2}.
\end{equation*}
Since $b_i \in \mathcal{B} \subseteq \overline{\mathcal{Z}}$,
there exists $z_i \in \mathcal{Z}$ such that
\begin{equation*}
\norm{z_i - b_i} < \frac{\epsilon}{2N(\abs{k_i} + 1)}.
\end{equation*}
Let $z \coloneqq \sum_{n=1}^{N} k_n z_n \in \mathcal{Z}$,
and this would give
\begin{align*}
\norm{x - z}
&\leq \norm{ x - \sum_{n=1}^{N} k_n b_n } + \norm{ \sum_{n=1}^{N} k_n b_n -
z} \\
&< \frac{\epsilon}{2} + \norm{ \sum_{n=1}^{N} k_n (b_n - z_n) } \\
&\leq \frac{\epsilon}{2} + \sum_{n=1}^{N} \abs{k_n} \norm{b_n - z_n} \\
&\leq \frac{\epsilon}{2} + \sum_{n=1}^{N} \frac{\epsilon}{2N} \\
&= \frac{\epsilon}{2} + \frac{\epsilon}{2} = \epsilon.
\end{align*}
Thus $\mathcal{Z}$ is dense in $\mathcal{X}$.
\end{proof}
\begin{notation}
Let $E \in \mathfrak{M}(\mathbb{R})$ and $1 \leq p \leq \infty$.
We set
\begin{equation*}
\SIMP_p(E, \mathbb{K})
= \SIMP(E, \mathbb{K}) \cap \mathcal{L}_p(E, \mathbb{K}).
\end{equation*}
\end{notation}
\begin{ex}
Prove that if $mE < \infty$ or if $p = \infty$, then
\begin{equation*}
\SIMP_p(E, \mathbb{K}) = \SIMP(E, \mathbb{K}).
\end{equation*}
\end{ex}
\begin{solution}
\hlbnoted{Case $p = \infty$}
By definition, a simple function $f$ has finite range, and so
$\nu_\infty(f) < \infty$.
Thus $\SIMP(E, \mathbb{K}) \subseteq \mathcal{L}_\infty(E, \mathbb{K})$
and so our result holds.
\noindent
\hlbnoted{Case $mE < \infty$}
This is quite similar, especially since the range of $f$ is finite,
and so integration of a finite function over a finite domain is going
to be finite.
Thus, again $\SIMP(E, \mathbb{K}) \subseteq \mathcal{L}_p(E, \mathbb{K})$.
\end{solution}
\begin{propo}[Density of Equivalence Classes of $\SIMP_p(E, \mathbb{K})$ in $(L_p(E, \mathbb{K}), \norm{\cdot}_p)$]\label{propo:density_of_equiv_classes_of_simple_functions_in_L_p}
Let $E \in \mathfrak{M}(\mathbb{R})$ be a Lebesgue measurable set
and $1 \leq p \leq \infty$. Then
\begin{equation*}
[\SIMP_p(E, \mathbb{K})]
\coloneqq \{ [ \phi ] : \phi \in \SIMP_p(E, \mathbb{K}) \}
\end{equation*}
is dense in
\begin{equation*}
(L_p(E, \mathbb{K}), \norm{\cdot}_p).
\end{equation*}
\end{propo}
\begin{strategy}
Recall
\cref{propo:increasing_sequence_of_simple_functions_that_converges_to_a_measurable_function}.
This is the proposition that is key to showing that
simple functions are dense, simply because we may
get as close to any $f \in \mathcal{L}(E, [0, \infty])$
as we want.
\begin{enumerate}
\item Reduce to the problem to only real-valued functions.
\item Reduce the problem to only positive real-valued functions.
\item It then remains to reconstruct a simple function in
$\mathcal{L}_p(E, \mathbb{R})$ that is as close to the
original real-valued function as we would like.
\end{enumerate}
\end{strategy}
\begin{proof}
\hlbnoted{Case $\mathbb{K} = \mathbb{C}$}
If we had proved the above for the case where $\mathbb{K} = \mathbb{R}$,
then for $[g] \in L_p(E, \mathbb{K})$ and $\epsilon > 0$,
we may write
\begin{equation*}
g = \Re g + i \Im g.
\end{equation*}
In particular, $g \in \mathcal{L}(E, \mathbb{C})$,
so it is necessary that $\Re g,\, \Im g \in \mathcal{L}(E, \mathbb{R})$ by
\cref{propo:component_wise_measurability}.
Then, by assumption that this works for $\mathbb{R}$,
we can pick $\phi_1,\, \phi_2 \in \SIMP(E, \mathbb{R})$ such that
\begin{gather*}
\norm{[\Re g] - [\phi_1]}_p < \frac{\epsilon}{2} \\
\norm{[\Im g] - [\phi_2]}_p < \frac{\epsilon}{2}.
\end{gather*}
Then, let
\begin{equation*}
\phi = \phi_1 + i \phi_2 \in \SIMP(E, \mathbb{C}),
\end{equation*}
which then
\begin{equation*}
\norm{[g] - [\phi]} \leq \norm{[\Re g] - [\phi_1]}_p + \abs{i} \norm{[\Im g] -
[\phi_2]}_p < \frac{\epsilon}{2} + \frac{\epsilon}{2} = \epsilon.
\end{equation*}
Then $[\SIMP(E, \mathbb{C})]$ is dense in $(L_p(E, \mathbb{C}), \norm{\cdot}_p)$.
\noindent
\hlbnoted{Case $\mathbb{K} = \mathbb{R}$}
We shall further break this into 2 cases,
of which we have seen in our last exercise.
\noindent
\hlbnotec{Case 1: $1 \leq p < \infty$}
$\forall \epsilon > 0$, let $[f] \in L_p(E, \mathbb{R})$.
Then $f \in \mathcal{L}_p(E, \mathbb{R})$ and we may write
\begin{equation*}
f = f^+ - f^-,
\end{equation*}
where $f^+,\, f^- \in \mathcal{L}_p(E, \mathbb{R})$.
By
\cref{propo:increasing_sequence_of_simple_functions_that_converges_to_a_measurable_function},
we can find simple functions
\begin{equation*}
0 \leq \phi_1 \leq \phi_2 \leq \phi_3 \leq \hdots \leq f^+
\end{equation*}
such that
\begin{equation*}
f^+(x) = \lim_{n \to \infty} \phi_n(x),\quad x \in E.
\end{equation*}
Note that
\begin{equation*}
\int_{E} \abs{\phi_n}^p \leq \int_{E} \abs{f^+}^p
\leq \int_{E} \abs{f}^p < \infty,
\end{equation*}
and so $\phi_n \in \SIMP_p(E, \mathbb{R})$, for $n \geq 1$.
Thus, by the
\hyperref[thm:lebesgue_dominated_convergence_theorem]{Lebesgue Dominated
Convergence Theorem},
\begin{equation*}
\lim_{n \to \infty} \int_{E} \abs{f^+ - \phi_n}^p = \int_{E} \lim_{n \to
\infty} \abs{f^+ - \phi_n}^p = 0
\end{equation*}
Thus we can find some $N_1 > 0$, such that for $n > N_1$, we have
\begin{equation*}
\norm{f^+ - \phi_n}_p < \frac{\epsilon}{2}.
\end{equation*}
Similarly, we can find simple functions
$\psi_1, \psi_2, \ldots \in \SIMP_P(E, \mathbb{R})$, such that
\begin{equation*}
0 \leq \psi_1 \leq \psi_2 \leq \psi_3 \leq \hdots - f^-,
\end{equation*}
such that
\begin{equation*}
f^-(x) = \lim_{n \to \infty} \psi_n(x),\quad x \in E,
\end{equation*}
and so that we can find $N_2 > 0$ where $\forall n > N_2$, we have
\begin{equation*}
\norm{f^- - \psi_n}_p < \frac{\epsilon}{2}.
\end{equation*}
Then
\begin{align*}
\norm{f - (\phi_n + \psi_n)}_p
&= \norm{f^+ - f^- - \phi_n - \psi_n}_p \\
&\leq \norm{f^+ - \phi_n}_p + \norm{f^- - \psi_n}_p \\
&< \frac{\epsilon}{2} + \frac{\epsilon}{2} = \epsilon.
\end{align*}
\noindent
\hlbnotec{Case 2: $p = \infty$}
Let $\epsilon > 0,\, [f] \in L_\infty(E, \mathbb{R})$,
and $M = \norm{f}_\infty$.
Then $\Range f \subseteq [-M, M] \eqqcolon I$.
Now choose $N > 0$ such that $\frac{1}{N} < \epsilon$.
\sidenote{Let us break $I$ into intervals of length $\frac{1}{N}$.
Doing this will allow
$\abs{f(x) - (-M + \frac{k}{N}) \chi_{f^{-1}(I_k)}} \leq \frac{1}{N}$.}
Let
\begin{equation*}
I_k = \left[ -M + \frac{k}{N},\, -M + \frac{k+1}{N} \right)
\end{equation*}
for $k \in \{ 0, \ldots, 2MN - 2 \}$, and $I_{2MN} = \left[ M - \frac{1}{N}, M \right]$.
Let $H_k \coloneqq f^{-1}(I_k)$, for $k \in \{ 0, \ldots, 2MN - 1 \}$.
Then $H_k$ is measurable by the measurability of $f$.
Let
\begin{equation*}
\phi \coloneqq \sum_{k=0}^{2MN-2} \left( -M + \frac{k}{N} \right) \chi_{H_k}.
\end{equation*}
It is clear that
$\phi \in \SIMP(E, \mathbb{R}) = \SIMP_\infty(E, \mathbb{R})$.
Furthermore,
\begin{equation*}
\abs{f(x) - \phi(x)} \leq \frac{1}{N} < \epsilon \quad \forall x \in E.
\end{equation*}
It follows that
\begin{equation*}
\norm{[f] - [\phi]}_\infty < \epsilon.
\end{equation*}
This completes the proof.
\end{proof}
\begin{propo}[Density of Equivalence Classes of Step Functions in $L_p$ Spaces]\label{propo:density_of_equivalence_classes_of_step_functions_in_l_p_spaces}
Let $a < b \in \mathbb{R}$.
If $1 \leq p < \infty$, then
\begin{equation*}
[\STEP([a, b], \mathbb{K})]
\end{equation*}
is dense in
\begin{equation*}
(L_p([a, b], \mathbb{K}), \norm{\cdot}_p).
\end{equation*}
\end{propo}
\begin{proof}
By a similar argument to what we provided for the case of $\mathbb{K} = \mathbb{C}$,
it suffices for us to show that the statement is true for the case when
$\mathbb{K} = \mathbb{R}$.
Notice that $[a, b] \in \mathfrak{M}(\mathbb{R})$,
and $m[a, b] = b - a < \infty$.
Let us see for ourselves that $\mathcal{Y} \coloneqq [\SIMP([a, b], \mathbb{R})]$
and $\mathcal{Z} \coloneqq [\STEP([a, b], \mathbb{R})]$
are linear manifolds in $L_p([a, b], \mathbb{R})$.
It is rather clear that
$\mathcal{Y}, \mathcal{Z} \subseteq L_p([a, b], \mathbb{R})$.
To show that $\mathcal{Y}$ is a linear manifold, we see that
for $\phi, \psi \in \SIMP([a, b], \mathbb{R})$ and $c \in \mathbb{R}$,
if we suppose wlog that $N < M$
and define $E_n = \emptyset$ and $\alpha_n = 0$ for $N < n \leq M$, then
\begin{align*}
c \phi + \psi
&= c \sum_{n=1}^{N} \alpha_n \chi_{E_n} + \sum_{m=1}^{M} \beta_m \chi_{H_m}
\\
&= \sum_{n=1}^{M} c (\alpha_n + \beta_n) \chi_{E_n \cup H_n}
\in \SIMP([a, b], \mathbb{R}).
\end{align*}
To show that $\mathcal{Z}$ is a linear manifold, we see that
for \\ \noindent
$\phi, \psi \in \STEP([a, b], \mathbb{R})$ and $c \in \mathbb{R}$,
if we suppose wlog that $N < M$
and define $I_n = \emptyset$ and $\alpha_n = 0$ for $N < n \leq M$,
and define coefficients such that
\begin{equation*}
c_n(x) = \begin{cases}
a_n + b_n & x \in I_n \cap J_n \\
a_n & x \in I_n \setminus J_n \\
b_n & x \in J_n \setminus I_n
\end{cases},
\end{equation*}
then
\begin{align*}
(c \phi + \psi)(x)
&= c \sum_{n=1}^{N} \alpha_n \chi_{I_n} + \sum_{m=1}^{M} \beta_m \chi_{J_m}
\\
&= c \sum_{n=1}^{M} c_n(x) (\chi_{I_n \setminus J_n} +
\chi_{I_n \cap J_n} + \chi_{J_n \setminus I_n})(x) \\
& \quad\in \STEP([a, b], \mathbb{R}).
\end{align*}
From here, notice that by
\hyperref[warning:step_functions_are_simple_but_the_converse_is_false]{our
warning on page
\pageref{warning:step_functions_are_simple_but_the_converse_is_false}},
$\mathcal{Z} \subseteq \mathcal{Y}$.
Furthermore, if we define
\begin{equation*}
\mathcal{B} \coloneqq \{ \chi_H : H \in \mathfrak{M}([a, b]) \},
\end{equation*}
then
\begin{equation*}
\mathcal{Y} = \Span \{ [\phi] : \phi \in \mathcal{B} \},
\end{equation*}
and so along with
\cref{propo:density_of_equivalence_classes_of_step_functions_in_l_p_spaces},
$\Span \mathcal{B}$ is dense in \\ \noindent
$(L_p([a, b], \mathbb{R}, \norm{\cdot}_p))$.
From \cref{lemma:lemma_6_31},
it suffices for us to show that $\mathcal{B} \subseteq \overline{\mathcal{Z}}$.
\sidenote[][-70pt]{We want to approximate any element $[\chi_H] \in \mathcal{B}$
using intervals.
Realizing that we are in $\mathbb{R}$,
we know that any open set $G \subseteq \mathbb{R}$ can be written
as a disjoint union of open intervals. Furthermore,
if we pick an open set $G$ that closely encloses $H$,
then we obtain disjoint open sets that closely approximates $H$.}
By \cref{thm:carath'_e_odory_s_and_lebesgue_s_definition_of_measurability},
we can find an open $H \subseteq G \subseteq \mathbb{R}$ such that
\begin{equation*}
m ( G \setminus H ) < \frac{\epsilon}{2}.
\end{equation*}
We may write
\begin{equation*}
G = \bigcup_{n=1}^{\infty} (a_n, b_n).
\end{equation*}
It is important that we note that each of the interval is finite,
since $mH \leq m[a, b] < \infty$, and $m(G \setminus H) < \infty$,
and thus $m(G) = m(H) + m(G \setminus H) < \infty$.
Furthermore, some of the $(a_n, b_n)$'s may be empty sets.
Now let
\begin{equation*}
G_k = \bigcup_{n=1}^{k} (a_n, b_n).
\end{equation*}
Clearly, $\lim_{k \to \infty} G_k = G$.
Then we may choose $N > 0$ such that
\begin{equation*}
m(G \setminus G_N) = \sum_{n=N+1}^{\infty} m([a_n, b_n]) <
\frac{\epsilon}{2}.
\end{equation*}
Let $\phi = \chi_{G_N \cap [a, b]}$.
It is clear that $\phi \in \STEP([a, b], \mathbb{R})$.
It remains to show that
\begin{equation*}
\nu_p(\chi_H - \phi) = \int_{[a, b]} \abs{\chi_H - \phi}^p < \epsilon.
\end{equation*}
Notice that
\begin{align*}
\abs{\chi_H(x) - \phi(x)} = \begin{cases}
\abs{1 - 0} = 1 & x \in H \setminus G_N \\
\abs{0 - 1} = 1 & x \in (G_N \cap [a, b]) \setminus H \\
\abs{1 - 1} = 0 & x \in H \cap G_N \\
\abs{0 - 0} = 0 & x \notin H \cup G_N
\end{cases}.
\end{align*}
It thus follows that
\begin{align*}
\nu_p(\chi_H - \phi)
&= \int_{[a, b]} \abs{\chi_H - \phi}^p \\
&= \int_{E} \abs{\chi_H - \phi} \\
&= m(H \setminus G_N) + m((G_N \cap [a, b]) \setminus H) \\
&\leq m(G \setminus G_N) + m(G \setminus H) \\
&< \frac{\epsilon}{2} + \frac{\epsilon}{2} = \epsilon.
\end{align*}
where $E = (H \setminus G_N) \cup ((G_N \cap [a, b]) \setminus H)$.
It thus follows that $[\chi_H] \in \overline{\Span} \mathcal{Z}$,
and so $\mathcal{Z} = [\STEP([a, b], \mathbb{R})]$ is dense in \\ \noindent
$(L_p([a, b], \mathbb{R}), \norm{\cdot}_p)$.
\end{proof}
\begin{note}
\cref{lemma:lemma_6_31} greatly simplified our proof above.
We completely circumvented the need to pick an arbitrary element
from $L_p([a, b], \mathbb{K})$ and try to approximate it
using step functions.
Instead, we need only approximate characteristic functions of measurable sets.
We shall use the same approach as we did in the proof above
to show that the equivalence classes of continuous functions
on a closed interval $[a, b]$, over $\mathbb{K}$, is dense
in $(L_p([a, b], \mathbb{K}), \norm{\cdot}_p)$.
\end{note}
\begin{thm}[Density of Equivalence Classes of Continuous Functions in $L_p$ Spaces]\label{thm:density_of_equivalence_classes_of_continuous_functions_in_l_p_spaces}
Let $a < b \in \mathbb{R}$. If $1 \leq p < \infty$, then $[\mathcal{C}([a, b],
\mathbb{K})]$ is dense in $(L_p([a, b], \mathbb{K}), \norm{\cdot}_p)$.
\end{thm}
\begin{proof}
We may once again assume that $\mathbb{K} = \mathbb{R}$,
as we did in the last 2 proofs.
Let
\begin{equation*}
\mathcal{B} \coloneqq \{ [\chi_{[r, s]}] : a \leq r < s \leq b \}.
\end{equation*}
By
\cref{propo:density_of_equivalence_classes_of_step_functions_in_l_p_spaces},
$\overline{\Span} \mathcal{B} = L_p([a, b], \mathbb{R})$. Let
\begin{equation*}
\mathcal{Z} \coloneqq [\mathcal{C}([a, b], \mathbb{R})].
\end{equation*}
By \cref{lemma:lemma_6_31},
it suffices to show that $\mathcal{B} \subseteq \overline{\mathcal{Z}}$.
Let $\epsilon > 0$ and $\chi_{[r, s]} \in [\chi_{[r, s]}] \in \mathcal{B}$.
Let $\frac{s-r}{2} > \delta > 0$ so that we consider the function
\begin{equation*}
f_{\delta}(x) = \begin{cases}
0 & x \in x \leq r \text{ or } x \geq s \\
\frac{1}{\delta} (x - r) & r < x \leq r + \delta \\
1 & r + \delta < x < s - \delta \\
-\frac{1}{\delta}(x - s) & s - \delta \leq x < s
\end{cases}.
\end{equation*}
\begin{marginfigure}
\centering
\begin{tikzpicture}
\draw[->] (-2, 0) -- (2, 0);
\draw[dashed] (-2, 1) node[left] {$1$} -- (2, 1);
\node[label={270:$r$}] at (-1.5, 0) {|};
\node[label={270:$s$}] at (1.5, 0) {|};
\node[label={270:$r + \delta$}] at (-1, 0) {|};
\node[label={270:$s - \delta$}] at (1, 0) {|};
\draw[thick,color=red]
(-2, 0) -- (-1.5, 0) -- (-1, 1) -- (1, 1) -- (1.5, 0) -- (2, 0);
\end{tikzpicture}
\caption{Shape of the continuous function $f_\delta$ for approximating
$\chi_{[r, s]}$}\label{fig:shape_of_the_continuous_function_f_delta_for_approximating_chi_r_s}
\end{marginfigure}
Then
\begin{align*}
\norm{[\chi_{[r, s]}] - [f_\delta]}_p^p
&= \int_{[a, b]} \abs{\chi_{[r, s]} - f_{\delta}}^p \\
&\leq \int_{[r, r + \delta] \cup [s - \delta, s]} 1^p \\
&= m([r, r + \delta]) + m([s - \delta, s]) \\
&= 2\delta.
\end{align*}
Then picking $\delta < \frac{\epsilon}{2}$ in the first place,
our work is done.
\end{proof}
\newthought{Recall} that a topological space is said to be \hldefn{separable}
if it admits a countable dense subset.
\begin{ex}[A way of finding a countable subset in a separable metric space]\label{ex:a_way_of_finding_a_countable_subset_in_a_separable_metric_space}
Suppose $(X, d)$ is a separable metric space, $\delta > 0$, and
\begin{equation*}
Y \coloneqq \{ x_\lambda : \lambda \in \Lambda \} \subseteq X \text{ satisfies }
d(x_\alpha, x_\beta) \geq \delta
\text{ for all } \alpha \neq \beta \in \Lambda.
\end{equation*}
Then $\Lambda$ is countable.
\sidenote[][-80pt]{We may intuitively think of the flow of the proof as follows.
If we can find such a $Y$ whose elements are always $\delta$ away
from one another in a separable metric space,
then this $Y$ should end up swallowing elements in $X$ almost everywhere,
and in particular, $Y$ would be at least countable.
However, $Y$ is at most countable since it cannot be dense
(elements that are within $\delta$ away from any element of $Y$ cannot be
closely approximated).}
\end{ex}
\begin{crly}[Separability of $L_p$ Spaces]\label{crly:separability_of_l_p_spaces}
Let $a < b \in \mathbb{R}$.
\begin{enumerate}
\item If $1 \leq p < \infty$, then
$(L_p([a, b], \mathbb{R}), \norm{\cdot}_p)$ is separable.
\item If $p = \infty$, then
$(L_\infty([a, b], \mathbb{K}), \norm{\cdot}_\infty)$ is not separable.
\end{enumerate}
\end{crly}
\begin{proof}
\begin{enumerate}
\item Fix $1 \leq p < \infty$.
Recall from \cref{remark:containment_of_lp_spaces} that for
$[f], [g] \in L_{\infty}([a, b], \mathbb{K}) \subseteq L_p([a, b], \mathbb{K})$,
we have
\begin{equation*}
\norm{[f] - [g]}_p
\leq \norm{[f] - [g]}_{\sup} \cdot m([a, b])^{\frac{1}{p}}
= \norm{[f] - [g]}_{\sup} (b - a)^{\frac{1}{p}}.
\end{equation*}
Let $\epsilon > 0$ and $[h] \in L_p([a, b], \mathbb{K})$.
By the
\hyperref[thm:density_of_equivalence_classes_of_continuous_functions_in_l_p_spaces]{density
of $[\mathcal{C}([a, b], \mathbb{K})]$ in $L_p([a, b], \mathbb{K})$},
we can find $[g] \in [\mathcal{C}([a, b], \mathbb{K})]$ such that
\begin{equation*}
\norm{[h] - [g]}_p < \frac{\epsilon}{3}.
\end{equation*}
By the \hlnotea{Weierstrass Approximation Theorem},
we can find a polynomial
$p(x) = p_0 + p_1 x + \hdots + p_m x^m \in \mathbb{C}[x]$
such that
\begin{equation*}
\norm{[g] - [p]}_\infty = \norm{g - p}_{\sup}
< \frac{\epsilon}{3(b - a)^{\frac{1}{p}}}.
\end{equation*}
By the density of $\mathbb{Q}$ in $\mathbb{R}$, we can find a polynomial
$q(x) = q_0 + q_1 x + \hdots + q_n x^n \in (\mathbb{Q} + i \mathbb{Q})[x]$
such that
\begin{equation*}
\norm{[p] - [q]}_\infty = \norm{p - q}_{\sup}
< \frac{\epsilon}{3 (b - a)^{\frac{1}{p}}}.
\end{equation*}
Observe that
\begin{align*}
&\norm{[h] - [q]}_p \\
&\leq \norm{[h] - [g]}_p + \norm{[g] - [p]}_p + \norm{[p] + [q]}_p \\
&\leq \norm{[h] - [g]}_p + \norm{[g] - [p]}_\infty (b - a)^{\frac{1}{p}}
+ \norm{[p] - [q]}_{\infty} (b - a)^{\frac{1}{p}} \\
&< \frac{\epsilon}{3}
+ \frac{\epsilon}{3(b-a)^{\frac{1}{p}}} (b-a)^{\frac{1}{p}}
+ \frac{\epsilon}{3(b-a)^{\frac{1}{p}}} (b-a)^{\frac{1}{p}} \\
&= \epsilon.
\end{align*}
Thus, this $q$ is the polynomial from a countable subset.
Therefore, $[(\mathbb{Q} + i \mathbb{Q})[x]]$ is dense in
$(L_p([a, b], \mathbb{K}), \norm{\cdot}_p)$.
\item Consider $a \leq r_1 < s_1 \leq b$ and $a \leq r_2 < s_2 \leq b$,
with $r_1 \neq r_2$ and $s_1 \neq s_2$.
Then the \hldefn{symmetric difference}
\begin{equation*}
[r_1, s_1] \Delta [r_2, s_2] \coloneqq
([r_1, s_1] \cup [r_2, s_2]) \setminus ([r_1, s_1] \cap [r_2, s_2])
\end{equation*}
contains an interval, say, $[u, v] \subseteq [a, b]$.
Notice that for any $x \in [u, v]$,
\begin{equation*}
\abs{ \chi_{[r_1, s_1]}(x) - \chi_{[r_2, s_2]} } = 1,
\end{equation*}
and so
\begin{equation*}
\norm{[\chi_{[r_1, s_1]}] - [\chi_{[r_2, s_2]}]}_\infty =
\norm{[\chi_{[r_1, s_1] \Delta [r_2, s_2]}]}_\infty = 1.
\end{equation*}
Consider
$\Lambda \coloneqq \{ (r, s) \in \mathbb{R}^2 : a \leq r < s \leq b \}$.
It is clear that $\Lambda$ is uncountable.
For any $(r_1, s_1) \neq (r_2, s_2) \in \Lambda$, by our above argument,
we have
\begin{equation*}
\norm{\chi_{[r_1, s_1]} - \chi_{[r_2, s_2]}}_{\sup} = 1.
\end{equation*}
By
\cref{ex:a_way_of_finding_a_countable_subset_in_a_separable_metric_space},
we have that $L_\infty([a, b], \mathbb{K})$ be must be separable.
\sidenote{All the elements $\chi_{[r, s]}$ are $1$-away from one another,
and so the contrapositive of the exercise gives us this counterexample.}
\end{enumerate}
\end{proof}
% section _l_p_spaces_continued_3 (end)
% chapter lecture_13_jun_20th_2019 (end)
\chapter{Lecture 14 Jun 25th 2019}%
\label{chp:lecture_14_jun_25th_2019}
% chapter lecture_14_jun_25th_2019
\section{Hilbert Spaces}%
\label{sec:hilbert_spaces}
% section hilbert_spaces
Given $E \in \mathfrak{M}(\mathbb{R})$,
we've seen that for $1 \leq p \leq \infty$,
$(L_p(E, \mathbb{R}), \norm{\cdot}_p)$ is a Banach space, i.e.
a complete normed linear space.
The case where $p = 2$ is a special space that merits our attention.
\begin{defn}[Inner Product]\index{Inner Product}\label{defn:inner_product}
An \hlnoteb{inner product} on a $\mathbb{K}$-vector space
$\mathcal{H}$ is a function
\begin{equation*}
\langle \cdot,\, \cdot \rangle : \mathcal{H} \times \mathcal{H} \to
\mathbb{K}
\end{equation*}
that satisfies
\begin{enumerate}
\item (\hlnoteb{positive definiteness}) $\langle x, x \rangle \geq 0$
for all $x \in \mathcal{H}$, and $\langle x, x \rangle = 0$ iff $x = 0$;
\item (\hlnoteb{conjugate bilinear}) for all $w, x, y, z \in \mathcal{H}$
and $\alpha, \beta \in \mathbb{K}$,
\begin{equation*}
\langle \alpha w + x, y + \beta z \rangle
= \alpha \langle w, y \rangle
+ \langle x, y \rangle
+ \alpha \overline{\beta} \langle w, z \rangle
+ \overline{\beta} \langle x + z \rangle;
\end{equation*}
\item (\hlnoteb{conjugate symmetry}) for all $x, y \in \mathcal{H}$,
$\langle x, y \rangle = \overline{\langle y, x \rangle}$.
\end{enumerate}
\end{defn}
\begin{defn}[Inner Product Space]\index{Inner Product Space}\label{defn:inner_product_space}
An \hlnoteb{inner product space (IPS)} is a vector space $\mathcal{H}$
endowed with an inner product.
\end{defn}
\begin{defn}[Orthogonality]\index{Orthogonality}\label{defn:orthogonality}
We say that $x, y$ in an IPS $\mathcal{H}$ are \hlnoteb{orthogonal}
if
\begin{equation*}
\langle x, y \rangle = 0.
\end{equation*}
\end{defn}
\begin{thm}[Cauchy-Schwarz Inequality]\index{Cauchy-Schwarz Inequality}\label{thm:cauchy_schwarz_inequality}
Suppose $(\mathcal{H}, \langle \cdot, \cdot \rangle)$
is an IPS over $\mathbb{K}$.
Then for all $x, y \in \mathcal{H}$,
\begin{equation*}
\abs{\langle x, y \rangle}
\leq \langle x, x \rangle^{\frac{1}{2}} \langle y, y \rangle^{\frac{1}{2}}.
\end{equation*}
\end{thm}
\begin{proof}
Note that if $\langle x, y \rangle = 0$, then there is nothing to show,
since inner products are positive definite.
So suppose $\langle x, y \rangle \neq 0$.
\sidenote{This proof is said to be typical of any kind of Cauchy-Schwarz-like
inequality. I am making this note because this is the rare time that I
have actually seen one in pure mathematics (still a greenhorn with
questionable basics).}
Let $\kappa \in \mathbb{K}$. Notice that
\begin{align*}
0 &\leq \langle x - \kappa y, x - \kappa y \rangle \\
&= \langle x, x \rangle - \kappa \langle y, x \rangle
- \overline{\kappa} \langle x, y \rangle
+ \abs{\kappa}^2 \langle y, y \rangle.
\end{align*}
So pick
\begin{equation*}
\kappa = \frac{\langle x, y \rangle}{\langle y, y \rangle}.
\end{equation*}
Then we have
\begin{align*}
0 &\leq \langle x, x \rangle
- \frac{\langle x, y \rangle}{\langle y, y \rangle} \langle y, x \rangle
- \frac{\langle y, x \rangle}{\langle y, y \rangle} \langle x, y \rangle
+ \frac{\abs{\langle x, y \rangle}^2}{\langle y, y \rangle^2} \langle y, y \rangle \\
&= \langle x, x \rangle
- \frac{\abs{\langle x, y \rangle}^2}{\langle y, y \rangle}
- \frac{\abs{\langle x, y \rangle^2}}{\langle y, y \rangle}
+ \frac{\abs{\langle x, y \rangle}^2}{\langle y, y \rangle} \\
&= \langle x, x \rangle
- \frac{\abs{\langle x, y \rangle}^2}{\langle y, y \rangle}.
\end{align*}
Thus
\begin{equation*}
\abs{\langle x, y \rangle}^2 \leq \langle x, x \rangle \langle y, y \rangle.
\end{equation*}
Hence
\begin{equation*}
\abs{\langle x, y \rangle}
\leq \langle x, x \rangle^{\frac{1}{2}} \langle y, y \rangle^{\frac{1}{2}}.
\end{equation*}
\end{proof}
\begin{propo}[Norm Induced by The Inner Product]\index{Norm Induced by The Inner Product}\label{propo:norm_induced_by_the_inner_product}
Let $(\mathcal{H}, \langle \cdot, \cdot \rangle)$ be an IPS.
Then the map
\begin{equation*}
\norm{x} \coloneqq \langle x, x \rangle^{\frac{1}{2}},\quad
x \in \mathcal{H}
\end{equation*}
defines a norm on $\mathcal{H}$,
called the \hlnoteb{norm induced by the inner product}.
\end{propo}
\begin{proof}
\hlbnoted{Positive Definiteness}
This immediately from the definition of an inner product.
\noindent
\hlbnoted{Scalar Multiplication}
Let $\kappa \in \mathbb{K}$ and $x \in \mathcal{H}$. Then
\begin{equation*}
\norm{\kappa x}^2 = \langle \kappa x, \kappa x \rangle
= \abs{\kappa}^2 \langle x, x \rangle
= \abs{\kappa}^2 \norm{x}^2.
\end{equation*}
Thus
\begin{equation*}
\norm{\kappa x} = \abs{\kappa} \norm{x}.
\end{equation*}
\noindent
\hlbnoted{Triangle Inequality}
By the \hyperref[thm:cauchy_schwarz_inequality]{Cauchy-Schwarz Inequality},
we have
\begin{align*}
\norm{x + y}^2
&= \langle x + y, x + y \rangle \\
&= \langle x, x \rangle + \langle x, y \rangle + \langle y, x \rangle + \langle y, y \rangle \\
&\leq \norm{x}^2 + \abs{\langle x, y \rangle} + \abs{\langle y, x \rangle} + \norm{y}^2 \\
&= \norm{x}^2
+ 2 \langle x, x \rangle^{\frac{1}{2}} \langle y, y \rangle^{\frac{1}{2}}
+ \norm{y}^2 \\
&= \norm{x}^2 + 2 \norm{x} \norm{y} + \norm{y}^2 \\
&= \left( \norm{x} + \norm{y} \right)^2.
\end{align*}
Hence
\begin{equation*}
\norm{x + y} \leq \norm{x} + \norm{y}.
\end{equation*}
\end{proof}
It follows that every IPS $(\mathcal{H}, \langle \cdot, \cdot \rangle)$
is also a normed linear space (NLS).
Furthermore, norms induce metrics, and so
every IPS is also a metric space.
\cref{fig:hierarchy_of_spaces_from_metric_space_to_normed_linear_space_then_down_to_inner_product_space}
is a highly abstract illustration of the idea.
\begin{marginfigure}
\centering
\includesvg[width=\marginparwidth]{images/metricsp-nls-ips}
\caption{Hierarchy of Spaces, from Metric Space to Normed Linear Space, then
down to Inner Product Space}
\label{fig:hierarchy_of_spaces_from_metric_space_to_normed_linear_space_then_down_to_inner_product_space}
\end{marginfigure}
\begin{defn}[Hilbert Space]\index{Hilbert Space}\label{defn:hilbert_space}
A \hlnoteb{Hilbert space} is a complete IPS.
\end{defn}
\begin{eg}
\begin{enumerate}
\item Let $N \geq 1$ be an integer, and $\mathcal{H} = \mathbb{C}^N$.
For
\begin{equation*}
x = (x_n)_{n=1}^{N},\, y = (y_n)_{n=1}^{N} \in \mathbb{C}^N,
\end{equation*}
we define
\begin{equation*}
\langle x, y \rangle = \sum_{n=1}^{N} x_n \overline{y_n}
\end{equation*}
as the inner product on $\mathbb{C}^N$.
This is, in fact,
called the \hldefn{standard inner product} on $\mathbb{C}^N$.
Furthermore, $\mathbb{C}^N$ is complete
wrt to the norm induced by this inner product.
Thus
\begin{equation*}
(\mathbb{C}^N, \langle \cdot, \cdot \rangle)
\end{equation*}
is a Hilbert space.
\item We can make the above slightly more general.
Fix $1 \leq N \in \mathbb{N}$, and choose some
\begin{equation*}
\rho_1, \rho_2, \ldots, \rho_N \in \mathbb{R}^+.
\end{equation*}
\begin{ex}
Check that $\mathbb{C}^N$, with the function
\begin{equation*}
\langle x, y \rangle_{\rho} \coloneqq \sum_{n=1}^{N} \rho_n x_n \overline{y_n}
\end{equation*}
that you are to check is an inner product, is a Hilbert space.
\end{ex}
\item The following is a space that will be very important for us.
The set
\begin{equation*}
\ell_2(\mathbb{K}) \coloneqq \{ (x_n)_{n \in \mathbb{N}}
: x_n \in \mathbb{K}, \sum \abs{x_n}^2 < \infty \},
\end{equation*}
with the inner product
\begin{equation*}
\langle (x_n)_n, (y_n)_n \rangle \coloneqq \sum_{n=1}^{\infty} x_n
\overline{y_n},
\end{equation*}
is called the \hldefn{sequence space} \hldefn{$\ell_2$} with its
\hldefn{standard inner product}.
The space
\begin{equation*}
(\ell_2(\mathbb{K}), \langle \cdot, \cdot \rangle)
\end{equation*}
is a Hilbert space.
\sidenote{We saw, in PMATH 351, that with the norm
induced by this inner product, $\ell_2(\mathbb{K})$
is a complete metric space.}
\end{enumerate}
\end{eg}
Let us now look at an inner product that we shall define on $L_2$.
\begin{thm}[The Standard Inner Product for $L_2(E, \mathbb{K})$]\label{thm:the_standard_inner_product_for_l_2_e_mathbb_k_}
Let $E \in \mathfrak{M}(\mathbb{R})$. The map
\begin{align*}
\langle \cdot, \cdot \rangle
: L_2(E, \mathbb{K}) \times L_2(E, \mathbb{K}) &\to \mathbb{K} \\
([f], [g]) &\mapsto \int_{E} f \overline{g}
\end{align*}
is an inner product on $L_2(E, \mathbb{K})$.
Furthermore, the norm induced by this inner product is the $L_2$-norm
$\norm{\cdot}_2$ on $L_2(E, \mathbb{K})$.
Since $(L_2(E, \mathbb{K}), \norm{\cdot}_2)$ is complete,
$(L_2(E, \mathbb{K}), \langle \cdot, \cdot \rangle)$ is a Hilbert space.
\end{thm}
\begin{proof}
Before anything else, we need to show that $\langle \cdot, \cdot \rangle$
is well-defined.
Notice that if $[f_1] = [f_2]$ and $[g_1] = [g_2]$
in $L_2(E, \mathbb{K})$, then $f_1 \overline{g_1} = f_2 \overline{g_2}$ a.e.
on $E$, and so
\begin{equation*}
\langle [f_1], [g_1] \rangle = \int_{E} f1 \overline{g_1}
= \int_{E} f_2 \overline{g_2}
= \langle [f_2], [g_2] \rangle.
\end{equation*}
Furthermore, by \cref{thm:holders_inequality_lp_spaces},
we have that
\begin{equation*}
\int_{E} \abs{f \overline{g}} = \norm{[f \overline{g}]}_1 \leq \norm{[f]}_2
\norm{[\overline{g}]}_2 < \infty.
\end{equation*}
Thus $\langle \cdot, \cdot \rangle$ is indeed well-defined.
\noindent
\hlbnoted{Showing that $\langle \cdot, \cdot \rangle$ is an inner product}
\hlnoteg{(Positive Definiteness)} Let $[f] \in L_2(E, \mathbb{K})$.
Notice that
\begin{equation*}
f \overline{f} = \abs{f}^2 \geq 0.
\end{equation*}
Thus
\begin{equation*}
\langle [f], [f] \rangle = \int_{E} f \overline{f} = \int_{E} \abs{f}^2 \geq 0.
\end{equation*}
Now if $[f] = [0] \in L_2(E, \mathbb{K})$, then
\begin{equation*}
\langle [f], [f] \rangle = \int_{E} \abs{f}^2 = \int_{E} 0^2 = 0.
\end{equation*}
\noindent
\hlnoteg{(Conjugate Bilinearity)} Let $[f], [g], [h] \in L_2(E, \mathbb{K})$,
and $\alpha, \beta \in \mathbb{K}$. Then
\begin{align*}
\langle \alpha [f] + \beta [g], [h] \rangle
&= \int_{E} (\alpha f + \beta g) \overline{h} \\
&= \alpha \int_{E} f \overline{h} + \beta \int_{E} g \overline{h} \\
&= \alpha \langle [f], [h] \rangle + \beta \langle [g], [h] \rangle,
\end{align*}
and
\begin{align*}
\langle [f], \alpha [g] + \beta [h] \rangle
&= \int_{E} f (\overline{\alpha g + \beta h}) \\
&= \overline{\alpha} \int_{E} f \overline{g}
+ \overline{\beta} \int_{E} f \overline{h} \\
&= \overline{\alpha} \langle [f], [g] \rangle
+ \overline{\beta} \langle [f], [h] \rangle.
\end{align*}
\noindent
\hlnoteg{(Conjugate Symmetry)} Let $[f], [g] \in L_2(E, \mathbb{K})$.
Then
\begin{equation*}
\langle [f], [g] \rangle
= \int_{E} f \overline{g}
= \overline{\int_{E} \overline{f \overline{g}} }
= \overline{\int_{E} \overline{f} g}
= \overline{\langle [g], [f] \rangle}.
\end{equation*}
\noindent
\hlbnoted{Showing that the norm induced by $\langle \cdot, \cdot \rangle$ is
the $L_2$-norm} \\ \noindent
By \cref{propo:norm_induced_by_the_inner_product}, we have
for any $[f] \in L_2(E, \mathbb{K})$,
\begin{equation*}
\norm{[f]} \coloneqq \langle [f], [f] \rangle^{\frac{1}{2}}
= \left( \int_{E} \abs{f}^2 \right)^{\frac{1}{2}} = \norm{[f]}_2.
\end{equation*}
\end{proof}
\begin{defn}[Orthonormality]\index{Orthonormality}\label{defn:orthonormality}
Let $\mathcal{E}$ be a subset of an IPS
$(\mathcal{H}, \langle \cdot, \cdot \rangle)$.
We say that $x \in \mathcal{E}$ has \hldefn{norm 1} if
\begin{equation*}
\norm{x} = \langle x, x \rangle^{\frac{1}{2}} = 1.
\end{equation*}
We say that $x, y \in \mathcal{E}$ are \hlnoteb{orthonormal}
if $x, y$ each has norm 1, and they are orthogonal to one another,
i.e. $\langle x, y \rangle = 0$.
We say that $\mathcal{E}$ is an \hldefn{orthonormal set}
if $\forall x, y \in \mathcal{E}$, $x$ and $y$ are orthonormal.
\end{defn}
\begin{defn}[Orthonormal Basis]\index{Orthonormal Basis}\label{defn:orthonormal_basis}
Let $\mathcal{H}$ be a Hilbert space.
An \hlnoteb{orthonormal basis (ONB)} (or \hldefn{Hilbert space basis}) for $\mathcal{H}$
is a \hlnotec{maximal (wrt inclusion) orthonormal set} in $\mathcal{H}$.
\end{defn}
\begin{remark}
\begin{enumerate}
\item By \hlnotea{Zorn's Lemma},
we can extend every orthonormal set in $\mathcal{H}$ to an ONB for $\mathcal{H}$.
\item If $\mathcal{H}$ is infinite-dimensional, then an ONB for $\mathcal{H}$
is \hlnotec{not} a \hlnotea{Hamel basis}
\sidenote{The \hldefn{Hamel basis} is a basis
that we are rather familiar with,
coming from a finite-dimensional world,
where the span of an ONB is the entire space.}
for $\mathcal{H}$.
\end{enumerate}
\end{remark}
\begin{eg}\label{eg:examples_of_onb}
\begin{enumerate}
\item Let $N \geq 1$ be an integer,
and consider $\mathcal{H} = \mathbb{C}^N$ endowed with
the standard inner product $\langle \cdot, \cdot \rangle$.
For $1 \leq n \leq N$, define
\begin{equation*}
e_n \coloneqq (\delta_{n, k})_{k=1}^{N},
\end{equation*}
where $\delta_{a, b}$ denotes the \hlnotea{Kronecker delta function}.
Then $\left\{ e_n \right\}_{n=1}^{N}$ is an ONB for $\mathbb{C}^N$.
\item Let $1 \leq N \in \mathbb{N}$ and $\rho_k = k$,
for $1 \leq k \leq N$.
Set
\begin{equation*}
e_n \coloneqq \left( \frac{1}{\sqrt{k}} \delta_{n,k} \right)_{k=1}^{N}.
\end{equation*}
Then $\left\{ e_n \right\}_{n=1}^{N}$ is \hlnotee{also}
an ONB for $\mathbb{C}^N$, with the rather awkward inner product
from our last example:
\begin{equation*}
\langle x, y \rangle_{\rho} = \sum_{n=1}^{N} \rho_n x_n \overline{y_n}.
\end{equation*}
\item \label{eg:onb_of_lill_2}
Generalizing the first example here to infinite dimensions,
let $\mathcal{H} = \ell_2(\mathbb{K})$, with its standard inner product.
For $n \geq 1$, let
\begin{equation*}
e_n = \left( \delta_{n, k} \right)_{k=1}^{\infty}.
\end{equation*}
\begin{ex}
Show that $\left\{ e_n \right\}_{n=1}^{N}$ is an ONB for
$\ell_2(\mathbb{K})$.
\end{ex}
\item \label{eg:onb_of_bigl_2}
Now for an orthonormal basis that is highly relevant to us.
Consider $\mathcal{H} = L_2([0, 2 \pi], \mathbb{C})$,
of which we have shown is a Hilbert space, with its standard inner product
\begin{equation*}
\langle [f], [g] \rangle \coloneqq \int_{E} f \overline{g}.
\end{equation*}
For $n \in \mathbb{Z}$, define the continuous function
\begin{align*}
\xi_n : [0, 2 \pi] &\to \mathbb{C} \\
\theta &\mapsto \frac{1}{\sqrt{2 \pi}} e^{in \theta}.
\end{align*}
Then $[\xi_n] \in L_2([0, 2 \pi], \mathbb{C})$ for all $n \in \mathbb{Z}$.
In A4, we shall see that $\left\{ [\xi_n] \right\}_{n \in \mathbb{Z}}$
is an ONB for $L_2([0, 2 \pi], \mathbb{C})$.
\end{enumerate}
\end{eg}
We recall the following result from linear algebra:
\begin{thm}[Gram-Schmidt Orthogonalisation Process]\index{Gram-Schmidt Orthogonalisation Process}\label{thm:gram_schmidt_orthogonalisation_process}
If $(\mathcal{H}, \langle \cdot, \cdot \rangle)$
is a Hilbert space over $\mathbb{K}$,
and $\left\{ x_n \right\}_{n=1}^{\infty}$ is a \hlnotea{linearly independent
set} in $\mathcal{H}$,
then we can find an orthonormal set
$\left\{ y_n \right\}_{n=1}^{\infty}$ in $\mathcal{H}$ so that
\begin{equation*}
\Span \left\{ x_1, \ldots, x_N \right\} = \Span \left\{ y_1, \ldots, y_N \right\}
\end{equation*}
for all $N \geq 1$.
\end{thm}
\begin{proof}
First, set
\begin{equation*}
y_1 = \frac{x_1}{\norm{x_1}}.
\end{equation*}
Notice that
\begin{align*}
\langle x_2 - \langle x_2, y_1 \rangle y_1, y_1 \rangle
&= \langle x_2, y_1 \rangle - \langle x_2, y_1 \rangle \langle y_1, y_1 \rangle \\
&= \langle x_2, y_1 \rangle - \langle x_2, y_1 \rangle \cdot 1 = 0.
\end{align*}
To get norm 1, we can then set
\begin{equation*}
y_2 = \frac{x_2 - \langle x_2, y_1 \rangle y_1}{\norm{x_2 - \langle x_2, y_1
\rangle y_1}}.
\end{equation*}
By induction, one can show that
\begin{equation*}
y_N = \frac{x_N - \sum_{n=1}^{N-1} \langle x_N, y_n \rangle y_n}{\norm{x_N -
\sum_{n=1}^{N-1} \langle x_N, y_n \rangle y_n}},
\end{equation*}
for $N \geq 1$, works.
\end{proof}
\begin{thm}[The Pythagorean Theorem and Parallelogram Law]\label{thm:the_pythagorean_theorem_and_parallelogram_law}
Let $\mathcal{H}$ be a Hilbert space
and suppose that $x_1, x_2, \ldots, x_n \in \mathcal{H}$.
\begin{enumerate}
\item (\hldefn{The Pythagorean Theorem})
If $\left\{ x_n \right\}_{n=1}^N$ is orthogonal, then
\begin{equation*}
\norm{ \sum_{n=1}^{N} x_n }^2 = \sum_{n=1}^{N} \norm{x_2}^2.
\end{equation*}
\item (\hldefn{The Parallelogram Law})
We have
\begin{equation*}
\norm{x_1 + x_2}^2 + \norm{x_1 - x_2}^2 = 2 \left( \norm{x_1}^2 +
\norm{x_2}^2 \right).
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\begin{enumerate}
\item Since $\langle x_n, x_m \rangle = 0$ for all $n \neq m$,
we have
\begin{equation*}
\norm{ \sum_{n=1}^{N} x_n }^2
= \left\langle \sum_{n=1}^{N} x_n, \sum_{n=1}^{N} x_n \right\rangle
= \sum_{n=1}^{N} \langle x_n, x_n \rangle
= \sum_{n=1}^{N} \norm{x_n}^2.
\end{equation*}
\item We see that
\begin{align*}
\norm{x_1 + x_2}^2 + \norm{x_1 - x_2}^2
&= \langle x_1 + x_2, x_1 + x_2 \rangle + \langle x_1 - x_2, x_1 - x_2 \rangle \\
&= 2 \langle x_1, x_1 \rangle + 2 \langle x_2, x_2 \rangle \\
&= 2 \left( \norm{x_1}^2 + \norm{x_2}^2 \right).
\end{align*}
\end{enumerate}
\end{proof}
% section hilbert_spaces (end)
% chapter lecture_14_jun_25th_2019 (end)
\chapter{Lecture 15 Jun 27th 2019}%
\label{chp:lecture_15_jun_27th_2019}
% chapter lecture_15_jun_27th_2019
\section{Hilbert Spaces (Continued)}%
\label{sec:hilbert_spaces_continued}
% section hilbert_spaces_continued
\begin{thm}[Closest Point from a Convex Set in a Hilbert Space]\label{thm:closest_point_from_a_convex_set_in_a_hilbert_space}
Let $\mathcal{H}$ be a Hilbert space,
and $K \subseteq \mathcal{H}$ be a closed,
non-empty convex subset of $\mathcal{H}$.
\marginnote{The proof of
\cref{thm:closest_point_from_a_convex_set_in_a_hilbert_space}
is left to the assignments.}
Given $x \in \mathcal{H}$,
there exists a unique point $y \in K$ that is closest to $x$,
i.e.
\begin{equation*}
\norm{x - y} = \dist(x, K) \coloneqq \min \{ \norm{x - z} : z \in K \}.
\end{equation*}
\end{thm}
\begin{thm}[A Way to Orthogonality]\label{thm:a_way_to_orthogonality}
Let $\mathcal{H}$ be a Hilbert space.
Let $\mathcal{M} \subseteq \mathcal{H}$ be a closed subspace.
Let $x \in \mathcal{H}$, and $m \in \mathcal{M}$. TFAE:
\begin{enumerate}
\item $\norm{x - m} = \dist(x, \mathcal{M})$ ;
\item The vector $x - m$ is orthogonal to $\mathcal{M}$, i.e.
\begin{equation*}
\langle x - m, y \rangle = 0 \text{ for all } y \in \mathcal{M}.
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\hlbnoted{$(1) \implies (2)$}
Suppose to the contrary that $\exists y \in \mathcal{M}$
such that
\begin{equation*}
\kappa \coloneqq \langle x - m, y \rangle \neq 0.
\end{equation*}
Wlog, suppose $\norm{y} = 1$.
\sidenote{We may assume so since if $\norm{y} \neq 1$, then we simply divide
$\kappa$ by $\norm{y}$ and we'll get a $y'$ with norm 1.}
Consider $z \coloneqq m + \kappa y \in \mathcal{M}$.
Then
\begin{align*}
\norm{x - z}^2
&= \langle x - z, x - z \rangle
= \langle x - m - \kappa y, x - m - \kappa y \rangle \\
&= \norm{x - m}^2 - \kappa \langle y, x - m \rangle
- \overline{\kappa} \langle x - m, y \rangle
+ \kappa \overline{\kappa} \langle y, y \rangle \\
&= \norm{x - m}^2 - \kappa \overline{\kappa}
- \overline{\kappa} \kappa + \kappa \overline{\kappa} \\
&= \norm{x - m}^2 - \abs{\kappa}^2 < \norm{x - m}^2,
\end{align*}
a contradiction.
Thus, such a $y$ cannot exist, and so the result holds.
\noindent
\hlbnoted{$(2) \implies (1)$} Suppose $\forall y \in \mathcal{M}$,
$\langle x - m, y \rangle = 0$.
Write
\begin{equation*}
\mathcal{M} \ni y = m + (y - m).
\end{equation*}
Observe that by the Pythagorean theorem,
\begin{align*}
\norm{x - y} &= \norm{x - m - y + m} = \norm{x - m} + \norm{m - y} \\
&\geq \norm{x - m}
\end{align*}
since $\norm{m - y} \geq 0$.
Thus $\norm{x - m} = \dist(x, \mathcal{M})$.
\end{proof}
\newthought{Let's} have a little talk about \hlnotea{complements}.
\begin{defn}[Perpendicular Space]\index{Perpendicular Space}\label{defn:perpendicular_space}
Given any non-empty subset $\mathcal{S}$ of a Hilbert space $\mathcal{H}$,
we define the \hlnoteb{perpendicular space} of $S$ as
\begin{equation*}
\mathcal{S}^{\perp}
\coloneqq \{ y \in \mathcal{H} : \langle x, y \rangle = 0,\, x \in \mathcal{S} \}.
\end{equation*}
\end{defn}
\begin{ex}\label{ex:perp_space_is_closed}
Show that $\mathcal{S}^{\perp}$ is a norm-closed subspace
\sidenote{A \hldefn{norm-closed subspace} is a subspace that is closed under
the norm of the ambient space.}
of $\mathcal{H}$.
\end{ex}
\begin{remark}
\begin{enumerate}
\item Observe that $0 \in \mathcal{S}^{\perp}$ always,
and $(\mathcal{S}^{\perp})^{\perp} \supseteq \mathcal{S}$.
It thus follows that
\begin{equation*}
\left( \mathcal{S}^{\perp} \right)^{\perp} \supseteq \overline{\Span} \mathcal{S},
\end{equation*}
the \hlnoted{norm closure of the linear span} of $\mathcal{S}$.
\item Let $\mathcal{V}$ is a vector space
and $\mathcal{W}$ is a (vector) subspace of $\mathcal{V}$.
Let
\begin{equation*}
\{ w_{\lambda} : \lambda \in \Lambda \}
\end{equation*}
be a (Hamel) basis for $\mathcal{W}$.
We may then extend $\{ w_\lambda : \lambda \in \Lambda \}$
to be a basis of $\mathcal{V}$, such as
\begin{equation*}
\{ w_\lambda : \lambda \in \Lambda \} \cup \{ x_\gamma : \gamma \in
\Gamma \}.
\end{equation*}
Let
\begin{equation*}
\mathcal{X} \coloneqq \Span \{ x_\gamma : \gamma \in \Gamma \}.
\end{equation*}
Then $\mathcal{X} \subseteq \mathcal{V}$ is a subspace,
and
\begin{enumerate}
\item $\mathcal{W} \cap \mathcal{X} = \{ 0 \}$; and
\item $\mathcal{V} = \mathcal{W} + \mathcal{X} \coloneqq \{ w + x : w
\in \mathcal{W},\, x \in \mathcal{X} \}$.
\end{enumerate}
We say that $\mathcal{W}$ is
\hldefn{alagebraically complemented} by $\mathcal{X}$.
This existence of $\mathcal{X}$ says that \hlnoteg{every subspace
is algebraically complemented}.
Note that $\mathcal{X}$ is \hlnotec{not unique}.
Indeed, if vectors of the basis for $\mathcal{X}$
are not of norm 1, then normalizing them all
gives us an ONB for $\mathcal{X}$.
We can do something similar with normed linear spaces (NLSs).
If $\mathfrak{X}$ is a Banach space
and $\mathfrak{Y}$ is a closed subspace of $\mathfrak{X}$,
we say that $\mathfrak{Y}$ is \hldefn{topologically complemented}
if there exists a \hlnotee{closed} subspace $\mathfrak{Z} \subseteq \mathfrak{X}$
such that $\mathfrak{Z}$ is an algebraic complement to $\mathfrak{Y}$,
i.e. that
\begin{enumerate}
\item $\mathfrak{Y} \cap \mathfrak{Z} = \{ 0 \}$; and
\item $\mathfrak{X} = \mathfrak{Y} + \mathfrak{Z}$.
\end{enumerate}
However, \hlnotec{not all} closed subspace of a Banach space
is topologically complemented.
\marginnote{
\begin{culture}[Phillip's Theorem]
$c_0 = \{ (x_n)_n \in \mathbb{K}^{\mathbb{N}}
: \lim\limits_{n \to \infty} x_n = 0 \} \subseteq \ell_\infty$
is not topologically complemented.
\end{culture}
\noindent
Cited from \citealp{whitley1996}.
}
We shall write $\mathfrak{X} = \mathfrak{Y} \oplus \mathfrak{Z}$
if $\mathfrak{Z}$ is a topological complement to $\mathfrak{Y}$.
Now let $\mathcal{H}$ be a Hilbert space
and $\mathcal{M} \subseteq \mathcal{H}$ be a closed subspace.
\noindent
\hlbnotec{Claim: $\mathcal{H} = \mathcal{M} \oplus \mathcal{M}^{\perp}$}
From \cref{ex:perp_space_is_closed}, $\mathcal{M}^{\perp}$ is closed.
Notice that if $z \in \mathcal{M} \cap \mathcal{M}^{\perp}$,
then
\begin{equation*}
\langle z, z \rangle = 0,
\end{equation*}
and so $z = 0$. Thus $\mathcal{M} \cap \mathcal{M}^{\perp} = \{ 0 \}$.
Let $x \in \mathcal{H}$. By
\cref{thm:closest_point_from_a_convex_set_in_a_hilbert_space},
$\exists m_1 \in \mathcal{M}$ such that
\begin{equation*}
\norm{x - m_1} = \dist(x, \mathcal{M}).
\end{equation*}
Furthermore, by \cref{thm:a_way_to_orthogonality},
$m_2 \coloneqq x - m_1 \in \mathcal{M}^{\perp}$. Thus we see that
\begin{equation*}
x = m_1 + m_2 \in \mathcal{M} + \mathcal{M}^{\perp}.
\end{equation*}
Since $\mathcal{M}$ and $\mathcal{M}^{\perp}$ are both closed subspaces,
we have $\mathcal{H} = \mathcal{M} \oplus \mathcal{M}^{\perp}$. $\dashv$
In fact, the above claim is much stronger than
what immediately meets the eye.
Given a Banach space $\mathfrak{X}$ and a
topologically complemented closed subspace $\mathfrak{Y}$,
there is generally no expectation of
a \hlnotec{unique} topological complement for $\mathfrak{Y}$.
For instance, $\mathfrak{X} = \mathbb{R}^2$ with, say,
$\norm{\cdot}_\infty$,
if we let $\mathfrak{Y}$ be the $x$-axis,
then any line that passes through the origin
and not equal to the $x$-axis would be a closed subspace
and is a topological complement to $\mathfrak{Y}$.
However, in the above claim,
the space $\mathcal{M}^{\perp}$ is \hlnotec{unique},
and we call $\mathcal{M}^{\perp}$ the \hldefn{orthogonal complement}
of $\mathcal{M}$.
\item \hlbnoted{The orthogonal projection}\index{Orthogonal Projection}
With $\mathcal{H}$ and $\mathcal{M}$ as in the last remark,
we have that $\mathcal{H} = \mathcal{M} \oplus \mathcal{M}^{\perp}$.
Now for any $x \in \mathcal{H}$, if we suppose that we can write
\begin{equation*}
m_1 + n_1 = x = m_2 + n_2,
\end{equation*}
where $m_1, m_2 \in \mathcal{M}$ and $n_1, n_2 \in \mathcal{M}^{\perp}$,
then
\begin{equation*}
0 = x - x = m_1 - m_2 + n_1 - n_2 \implies
m_1 - m_2 = n_1 - n_2.
\end{equation*}
But $m_1 - m_2 \in \mathcal{M}$ and $n_1 - n_2 \in \mathcal{M}^{\perp}$,
and so $m_1 - m_2 = 0 = n_1 - n_2$,
i.e. $m_1 = m_2$ and $n_1 = n_2$.
Thus, we may uniquely represent each $x \in \mathcal{H}$ as
\begin{equation*}
x = m + n \quad
\text{ where } m \in \mathcal{M},\, n \in \mathcal{M}^{\perp}.
\end{equation*}
Now consider the map
\begin{align*}
P : \mathcal{H} &\to \mathcal{M} \oplus \mathcal{M}^{\perp} \\
x &\mapsto m.
\end{align*}
This map $P$ is called an \hlnoteb{orthogonal projection}.
\hlbnoted{Continuity of the orthogonal projection}
Observe that given $x_1 = m_1 + n_1, x_2 = m_2 + n_2 \in \mathcal{H}$
and $\kappa \in \mathbb{K}$,
we have
\begin{align*}
P(\kappa x_1 + x_2) &= P(\kappa (m_1 + n_1) + m_2 + n_2) \\
&= \kappa m_1 + m_2 = \kappa P(x_1) + P(x_2).
\end{align*}
Thus $P$ is linear.
Furthermore,
\begin{equation*}
P ( P ( x_1 ) ) = P( m_1 ) = m_1.
\end{equation*}
Thus $P^2 = P$, and we say that $P$ is an \hldefn{idempotent}.
\marginnote{On a related note to the orthogonal projection,
observe that the `projection in the other way' is also
an orthogonal projection. That is, $Q = I - P$,
where $I$ is the identity function, that would give
$Q(x_1) = (I - P)(x_1) = m_1 + n_1 - m_1 = n_1$, is also
an orthogonal projection.}
In fact, for $x \in \mathcal{H}$, we have that
\begin{equation*}
\norm{Px}^2 = \norm{m}^2 \leq \norm{m}^2 + \norm{n}^2
= \norm{m + n}^2 = \norm{x}^2.
\end{equation*}
Thus the operator norm on $P$ is
\begin{equation*}
\norm{P} = \sup \{ \norm{Px} : \norm{x} \leq 1 \} \leq 1.
\end{equation*}
It follows that $P$ is bounded. Since it is linear,
it is also continuous.
Finally, notice that if $m \in \mathcal{M} \neq \{ 0 \}$ such that
$\norm{m} = 1$, then
\begin{equation*}
\norm{Pm} = \norm{m} = 1.
\end{equation*}
\item Let $\emptyset \neq \mathcal{S} \subseteq \mathcal{H}$.
By the first remark, if we let
$\mathcal{M} = \overline{\Span} \mathcal{S}$,
then $\mathcal{M}$ is a closed subspace of $\mathcal{H}$.
By the second remark, we have
\begin{equation*}
\mathcal{H} = \mathcal{M} \oplus \mathcal{M}^{\perp}.
\end{equation*}
\begin{ex}
Show that $\mathcal{S}^{\perp} = \mathcal{M}^{\perp}$.
\end{ex}
Suppose $\exists 0 \neq x \in \left( \mathcal{S}^\perp \right)^\perp$
such that $x \notin \mathcal{M}$.
Notice that since $x \in \mathcal{H}$, we can write
\begin{equation*}
x = m_1 + m_2,
\end{equation*}
where $m_1 \in \mathcal{M}$ and $m_2 \in \mathcal{M}^\perp$.
Notice that $m_2 \neq 0 \in \mathcal{M}^\perp = \mathcal{S}^\perp$,
since otherwise, $x \in \mathcal{M}$.
But then
\begin{equation*}
\langle x, m_2 \rangle = \langle m_1 + m_2, m_2 \rangle
= 0 + \norm{m_2}^2 \neq 0.
\end{equation*}
Thus $x \notin \left( \mathcal{S}^\perp \right)^\perp$,
a contradiction.
It follows that $\left( \mathcal{S}^\perp \right)^\perp \subseteq
\overline{\Span} \mathcal{S}$, and so by the first remark,
\begin{equation*}
\left( \mathcal{S}^\perp \right)^\perp = \overline{\Span} \mathcal{S}.
\end{equation*}
\end{enumerate}
\end{remark}
\begin{lemma}[Finite Dimensional Linear Manifolds are Norm-closed Subspaces]\label{lemma:finite_dimensional_linear_manifolds_are_norm_closed_subspaces}
Let $\mathcal{H}$ be a Hilbert space over $\mathbb{K}$,
and suppose that $\mathcal{M} \subseteq \mathcal{H}$ is a finite-dimensional
linear manifold in $\mathcal{H}$.
Then $\mathcal{M}$ is norm-closed, and hence a subspace of $\mathcal{H}$.
\end{lemma}
\begin{proof}
The proof of
\cref{lemma:finite_dimensional_linear_manifolds_are_norm_closed_subspaces}
is left to the assignments.
\end{proof}
\begin{propo}[Formulae for Orthogonal Projections in Hilbert Spaces onto a Finite-Dimensional Subspace]\label{propo:formulae_for_orthogonal_projections_in_hilbert_spaces_onto_a_finite_dimensional_subspace}
Suppose $\mathcal{M}$ is a finite-dimensional subspace
of a Hilbert space $\mathcal{H}$ over $\mathbb{K}$.
Suppose that $\exists N \in \mathbb{N} \setminus \{ 0 \}$,
such that $\{ e_1, \ldots, e_N \}$ is an ONB for $\mathcal{H}$.
If $P$ is the orthogonal projection of $\mathcal{H}$ onto $\mathcal{M}$,
then
\begin{equation*}
Px = \sum_{n=1}^{N} \langle x, e_n \rangle e_n, \quad x \in \mathcal{H}.
\end{equation*}
\end{propo}
\begin{proof}
Let $x \in \mathcal{H} = \mathcal{M} \oplus \mathcal{M}^\perp$,
and write $x = m_1 + m_2$, with $m_1 \in \mathcal{M}$ and $m_2 \in \mathcal{M}^\perp$.
By the second point in the last remark,
we have that $Px = m_1$ is unique such that $x - Px \in \mathcal{M}^\perp$.
Consider $w = \sum_{n=1}^{N} \langle x, e_n \rangle e_n$.
For $m \in \{ 1, \ldots, N \}$, we observe that
\begin{align*}
\langle x - w, e_m \rangle
&= \langle x, e_m \rangle
- \sum_{n=1}^{N} \langle x, e_n \rangle \langle e_n, e_m \rangle \\
&= \langle x, e_m \rangle - \langle x, e_m \rangle \cancelto{1}{\langle e_m, e_m \rangle} \\
&= 0.
\end{align*}
Thus $x - w \in \mathcal{M}^\perp$, and so
\begin{equation*}
Px = w = \sum_{n=1}^{N} \langle x, e_n \rangle e_n.
\end{equation*}
\end{proof}
\begin{thm}[Bessel's Inequality]\index{Bessel's Inequality}\label{thm:bessel_s_inequality}
If $\{ e_n \}_{n=1}^{\infty}$ is an orthonormal set
in a Hilbert space $\mathcal{H}$, then for each $x \in \mathcal{H}$,
\begin{equation*}
\sum_{n=1}^{\infty} \abs{ \langle x, e_n \rangle }^2 \leq \norm{x}^2.
\end{equation*}
\end{thm}
\begin{proof}
For each $N \in \mathbb{N} \setminus \{ 0 \}$, set
\begin{equation*}
\mathcal{M}_N \coloneqq \Span \{ e_1, \ldots, e_N \}.
\end{equation*}
Then each $\mathcal{M}_N$ is a finite-dimensional subspace of $\mathcal{H}$
with ONB $\{ e_1, \ldots, e_N \}$.
For each $N$, let $P_N$ be the orthogonal projection from $\mathcal{H}$ to $\mathcal{M}_N$.
From the last discussion on the 3rd point of the last remark,
since $\norm{e_n} = 1$, we have that $\norm{P_N} = 1$ for all $N$.
By
\cref{propo:formulae_for_orthogonal_projections_in_hilbert_spaces_onto_a_finite_dimensional_subspace},
we observe that
\begin{align*}
\norm{x}^2 &\geq \norm{P_N x}^2
= \norm{ \sum_{n=1}^{N} \langle x, e_n \rangle e_n }^2 \\
&= \sum_{n=1}^{N} \abs{\langle x, e_n \rangle e_n}^2 \\
&= \sum_{n=1}^{N} \abs{\langle x, e_n \rangle}^2
\end{align*}
by the \hyperref[thm:the_pythagorean_theorem_and_parallelogram_law]{Pythagorean Theorem}.
\end{proof}
% section hilbert_spaces_continued (end)
% chapter lecture_15_jun_27th_2019 (end)
\chapter{Lecture 16 Jul 04th 2019}%
\label{chp:lecture_16_jul_04th_2019}
% chapter lecture_16_jul_04th_2019
\section{Hilbert Spaces (Continued 2)}%
\label{sec:hilbert_spaces_continued_2}
% section hilbert_spaces_continued_2
\begin{thm}[Countability of an Orthonormal Set in a Separable Hilbert Space]\label{thm:countability_of_an_orthonormal_set_in_a_separable_hilbert_space}
Let $\mathcal{H}$ be a(n) (infinite-dimensional) separable Hilbert space,
and suppse that $\mathcal{E} \subseteq \mathcal{H}$ is an orthonormal set.
Then $\mathcal{E}$ is countable, say as $\mathcal{E} = \{ e_n \}_{n=1}^{\infty}$,
and if $x \in \mathcal{H}$, then
\begin{equation*}
\sum_{n=1}^{\infty} \langle x, e_n \rangle e_n
\end{equation*}
converges in $\mathcal{H}$.
\end{thm}
\begin{proof}
First, notice that for $x \neq y \in \mathcal{E}$, we have
\begin{equation*}
\norm{x - y}
= \langle x - y, x - y \rangle^{\frac{1}{2}}
= \left( \norm{x}^2 + \norm{y}^2 \right)^{\frac{1}{2}} = \sqrt{2}.
\end{equation*}
By \cref{ex:a_way_of_finding_a_countable_subset_in_a_separable_metric_space},
we have that $\mathcal{E}$ is indeed countable.
Let $x \in \mathcal{H}$ and $\epsilon > 0$. For each $N \geq 1$, set
\begin{equation*}
y_N = \sum_{n=1}^{N} \langle x, e_n \rangle e_n. \footnotemark
\end{equation*}
\footnotetext{The keen reader should notice that
we are simply taking $y_N = P_N x$ from the proof
of \hyperref[thm:bessel_s_inequality]{Bessel's Inequality}.}
Since $\mathcal{H}$ is complete (for it is a Hilbert space),
it suffices for us to show that $\{ y_N \}_{N=1}^{\infty}$ is Cauchy.
From \hyperref[thm:bessel_s_inequality]{Bessel's Inequality}, we know that
\begin{equation*}
\sum_{n=1}^{\infty} \abs{\langle x, e_n \rangle}^2 \leq \norm{x}^2 < \infty.
\end{equation*}
Thus, for any $\epsilon > 0$, rearranging if necessary,
we can find some $N_0 > 0$ such that
\begin{equation*}
\sum_{n = N_0 + 1}^{\infty} \abs{\langle x, e_n \rangle}^2 < \epsilon.
\end{equation*}
Then for $M > N > N_0$, we see that
\begin{align*}
\norm{y_M - y_N}^2
&= \norm{ \sum_{n=1}^{M} \langle x, e_n \rangle e_n
- \sum_{n=1}^{N} \langle x, e_n \rangle e_n }^2 \\
&= \norm{ \sum_{n = N + 1}^{M} \langle x, e_n \rangle e_n }^2 \\
&= \sum_{n=N + 1}^{M} \abs{\langle x, e_n \rangle}^2 \quad \because
\text{\hyperref[thm:the_pythagorean_theorem_and_parallelogram_law]{Pythagorean
Theorem}} \\
&\leq \sum_{n = N_0}^{\infty} \abs{\langle x, e_n \rangle}^2 < \epsilon.
\end{align*}
It follows that the limit of the Cauchy sequence $\{ y_N \}_{N=1}^{\infty}$
is in $\mathcal{H}$.
\end{proof}
\begin{thm}[Characterization of an ONB]\label{thm:characterization_of_an_onb}
Let $\mathcal{E} = \{ e_n \}_{n=1}^{\infty}$ be an orthonormal set
in an infinite-dimensional, separable Hilbert space $\mathcal{H}$. TFAE:
\begin{enumerate}
\item $\mathcal{E}$ is an ONB, i.e. $\mathcal{E}$ is a maximal
orthonormal set in $\mathcal{H}$.
\item $\overline{\Span} \mathcal{E} = \mathcal{H}$.
\item $\forall x \in \mathcal{H}$, $x = \sum_{n=1}^{\infty} \langle x, e_n
\rangle e_n$.
\item (\hldefn{Parseval's Identity}) $\forall x \in \mathcal{H}$,
$\norm{x}^2 = \sum_{n=1}^{\infty} \abs{\langle x, e_n \rangle}^2$.
\end{enumerate}
\end{thm}
\begin{proof}
\hlbnoted{(1) $\implies$ (2)} Firstly, it is clear that
$\mathcal{E} \subseteq \mathcal{M} \coloneqq \overline{\Span} \mathcal{E} \subseteq \mathcal{H}$.
In particular, $\{ 0 \} \neq \mathcal{E} \subseteq \mathcal{M}$,
so $\mathcal{M}^\perp \neq \{ 0 \}$.
Then $\exists 0 \neq x \in \mathcal{M}^\perp$ such that
$\mathcal{E} \cup \{ x \}$ is also an orthonormal basis,
contradicting maximality of $\mathcal{E}$.
\noindent
\hlbnoted{(2) $\implies$ (3)} Let $\mathcal{M} = \overline{\Span} \mathcal{E} = \mathcal{H}$.
Let $y = \sum_{n=1}^{\infty} \langle x, e_n \rangle e_n$.
Observe that by a similar step in the proof in
\cref{propo:formulae_for_orthogonal_projections_in_hilbert_spaces_onto_a_finite_dimensional_subspace},
we observe that
\begin{align*}
\langle x - y, e_m \rangle = 0, \quad \text{ for each } m \in \mathbb{N} \setminus \{ 0 \}.
\end{align*}
It follows that $x - y \in \mathcal{M}^\perp = \{ 0 \}$, and so $x = y$.
\noindent
\hlbnoted{(3) $\implies$ (4)}
We see that
\begin{align*}
\norm{x}^2
&= \norm{ \sum_{n=1}^{\infty} \langle x, e_n \rangle e_n }^2 \\
&= \langle \sum_{n=1}^{\infty} \langle x, e_n \rangle e_n,
\sum_{m=1}^{\infty} \langle x, e_m \rangle e_m \rangle \\
&= \sum_{n=1}^{\infty} \sum_{m=1}^{\infty} \langle x, e_n \rangle
\overline{\langle x, e_m \rangle} \langle e_n, e_m \rangle \\
&= \sum_{n=1}^{\infty} \langle x, e_n \rangle
\overline{\langle x, e_n \rangle} \cancelto{1}{\langle e_n, e_n \rangle} \\
&= \sum_{n=1}^{\infty} \abs{\langle x, e_n \rangle}^2.
\end{align*}
\noindent
\hlbnoted{(4) $\implies$ (1)}
Suppose $x \in \mathcal{E}^\perp$. Then for all $n \in \mathbb{N} \setminus \{ 0 \}$,
\begin{equation*}
\norm{x}^2 = \sum_{n=1}^{\infty} \abs{\langle x, e_n \rangle}^2 = 0,
\end{equation*}
and so $x = 0$, i.e. $\mathcal{E}^\perp = \{ 0 \}$.
Hence $\mathcal{E}$ is indeed maximum.
\end{proof}
\begin{defn}[Unitary Operator]\index{Unitary Operator}\label{defn:unitary_operator}
Let $\mathcal{H}_1, \mathcal{H}_2$ be Hilbert spaces over $\mathbb{K}$.
A map $U : \mathcal{H}_1 -> \mathcal{H}_2$
is called a \hlnoteb{unitary operator}
if it is a linear bijection such that
\begin{equation*}
\langle Ux, Uy \rangle = \langle x, y \rangle
\end{equation*}
for all $x, y \in \mathcal{H}_1$.
\end{defn}
\begin{defn}[Isomorphism of Hilbert Spaces]\index{Isomorphism}\label{defn:isomorphism_of_hilbert_spaces}
Let $\mathcal{H}_1, \mathcal{H}_2$ be Hilbert spaces over $\mathbb{K}$.
We say that $\mathcal{H}_1$ and $\mathcal{H}_2$ are \hlnoteb{isomorphic}
if there exists a unitary operator
$U : \mathcal{H}_1 -> \mathcal{H}_2$.
We denote this relationship as $\mathcal{H}_1 \simeq \mathcal{H}_2$.
\end{defn}
\begin{note}
Note that $\forall x \in \mathcal{H}$, we have that
\begin{equation*}
\norm{Ux}^2 = \langle Ux, Ux \rangle = \langle x, x \rangle = \norm{x}^2.
\end{equation*}
In particular, unitary operators are \hldefn{isometries}.
Furthermore, observe that
\begin{equation*}
\norm{U} = \sup \{ \norm{Ux} : \norm{x} \leq 1 \} \leq 1,
\end{equation*}
and so unitary operators are bounded and continuous.
Moreover, the inverse map $U^{-1} : \mathcal{H}_2 \to \mathcal{H}_1$
defined by $U^{-1}(Ux) \coloneqq x$ is also linear,
and
\begin{equation*}
\langle U^{-1}(Ux), U^{-1}(Uy) \rangle = \langle x, y \rangle = \langle Ux, Uy \rangle,
\end{equation*}
the inverse of a unitary operator is also a unitary operator.
\end{note}
\begin{remark}
Note that if $\mathcal{L} \subseteq \mathcal{H}_1$ is a closed subspace,
then $\mathcal{L}$ is complete,
whence $U \mathcal{L}$ is also complete,
and hence closed in $\mathcal{H}_2$.
\end{remark}
The proof of the following theorem is left to the assignments.
\begin{thm}[Isomorphism of Infinite-dimensional Separable Hilbert Spaces]\label{thm:isomorphism_of_infinite_dimensional_separable_hilbert_spaces}
Any 2 infinite-dimensional separable Hilbert spaces over $\mathbb{K}$
are isomorphic.
\end{thm}
% section hilbert_spaces_continued_2 (end)
\section{Introduction to Fourier Analysis}%
\label{sec:introduction_to_fourier_analysis}
% section introduction_to_fourier_analysis
\begin{remark}
As a result of
\cref{thm:isomorphism_of_infinite_dimensional_separable_hilbert_spaces},
it follows that
if $\mathcal{H}$ is a complex, separable, infinite-dimensional Hilbert space,
then $\mathcal{H} \simeq \ell_2$.
\marginnote{One must wonder why do we focus on $E = [-\pi, \pi]$.
For a relatively good motivation for the things that are to come,
please read \cref{chp:interest_in_2_pi_periodic_functions}.}
Now,
\begin{itemize}
\item from \cref{crly:separability_of_l_p_spaces},
$L_2([-\pi, \pi], \mathbb{K})$ is separable;
\item from \cref{eg:onb_of_bigl_2} of \cref{eg:examples_of_onb},
$L_2([-\pi, \pi], \mathbb{K})$ is infinite-dimensional,
with the ONB $\{ [\xi_n] \}_{n \in \mathbb{Z}}$; and
\item by \cref{thm:the_standard_inner_product_for_l_2_e_mathbb_k_},
$L_2([-\pi, \pi], \mathbb{K})$ is a Hilbert space,
with the inner product
\begin{equation*}
\langle [f], [g] \rangle = \int_{E} f \overline{g}.
\end{equation*}
\end{itemize}
Let us define
\begin{align*}
\mathcal{L}_2(\mathbb{T}, \mathbb{C})
&\coloneqq \left\{ f : \mathbb{R} -> \mathbb{C} :
f \text{ is measurable, $2 \pi$-periodic, } \right. \\
&\qquad\quad\left. \text{and }
\int_{[\pi, \pi)} \abs{f}^2 < \infty
\right\}.
\end{align*}
\begin{ex}
Show that $\mathcal{L}_2(\mathbb{T}, \mathbb{C})$ is
a vector space, and that the function
\begin{align*}
\nu_2 : \mathcal{L}_2(\mathbb{T}, \mathbb{C}) &\to \mathbb{R} \\
f & \mapsto \left( \frac{1}{2 \pi} \int_{[-\pi, \pi)} \abs{f}^2 \right)^{1 / 2}
\end{align*}
is a semi norm on $\mathcal{L}_2(\mathbb{T}, \mathbb{C})$.
\end{ex}
Now let
\begin{equation*}
\mathcal{N}_2(\mathbb{T}, \mathbb{C}) \coloneqq
\{ f \in \mathcal{L}_2(,\mathbb{T}, \mathbb{C}) : \nu_2(f) = 0 \}.
\end{equation*}
It follows that if
$L_2(\mathbb{T}, \mathbb{C}) = \mathcal{L}_2(\mathbb{T}, \mathbb{C})
/ \mathcal{N}(\mathbb{T}, \mathbb{C})$,
then $[f] = [g] \in L_2(\mathbb{T}, \mathbb{C})$ iff
$f = g$ a.e. on $\mathbb{R}$,
or equivalently $f = g$ a.e. on $[-\pi, \pi)$,
since they are $2 \pi$-periodic functions on $\mathbb{R}$.
We can then obtain a norm on $L_2(\mathbb{T}, \mathbb{C})$
by setting $\norm{[f]}_2 \coloneqq \nu_2(f)$.
Furthermore, the function
\begin{align*}
\langle \cdot, \cdot \rangle_{\mathbb{T}}
: L_2(\mathbb{T}, \mathbb{C}) \times L_2(\mathbb{T}, \mathbb{C}) &\to \mathbb{C} \\
([f], [g]) &\mapsto \frac{1}{2 \pi} \int_{[-\pi, \pi)} f \overline{g}
\end{align*}
is an inner product on $L_2(\mathbb{T}, \mathbb{C})$,
and $\norm{\cdot}_2$ is precisely the norm induced by the inner product.
By what we've seen in the last section, $L_2(\mathbb{T}, \mathbb{C})$
is complete wrt the norm $\norm{\cdot}$, and is therefore a Hilbert space.
One can finally verify that $\{[\xi_n]\}_{n \in \mathbb{Z}}$,
where $\xi_n(\epsilon) = e^{in \theta}$,
is indeed and ONB for $L_2(\mathbb{T}, \mathbb{C})$.
\end{remark}
\begin{eg}[Fourier Series for $L_2(\mathbb{T}, \mathbb{C})$, and the isomorphism between $L_2(\mathbb{T}, \mathbb{C})$ and $\ell_2(\mathbb{Z}, \mathbb{C})$]\label{eg:fourier_for_bigl2_in_complex_unit_circle}
Let $[f] \in L_2([-\pi, \pi], \mathbb{C})$.
From A5Q4, we can show that $\{ [ \xi_n ] \}_{n \in \mathbb{Z}}$,
where
\begin{align*}
\xi_n : [-\pi, \pi] &\to \mathbb{C} \\
\theta &\mapsto \frac{1}{\sqrt{2 \pi}} e^{in \theta},
\end{align*}
is an ONB for $L_2([\pi, \pi], \mathbb{C})$.
For any $n \in \mathbb{Z}$, let
\begin{equation*}
\alpha_n^{[f]} \coloneqq \langle [f], [\xi_n] \rangle.
\end{equation*}
We shall refer to $\alpha_n^{[f]}$ as the
\hldefn{$n$\textsuperscript{th}-Fourier coefficient} of $[f]$
relative to the ONB $\{ [\xi_n] \}_{n \in \mathbb{Z}}$.
By A7Q2, we have that the map
\begin{align*}
U : L_2(\mathbb{T}, \mathbb{C}) &\to \ell_2(\mathbb{Z}, \mathbb{C}) \\
[f] &\mapsto \left( \alpha_{n}^{[f]} \right)_{ n \in \mathbb{Z} }
\end{align*}
is a unitary operator from the Hilbert space
$L_2(\mathbb{T}, \mathbb{C})$ to $\ell_2(\mathbb{Z}, \mathbb{C})$.
In particular, $U$ is injective.
This means that if $[f],, [g] \in L_2(\mathbb{T}, \mathbb{C})$
and $\alpha_n^{[f]} = \alpha_n^{[g]}$ for all $n \in \mathbb{Z}$,
then $f = g$ a.e. on $\mathbb{R}$.
In other words, an element $[f] \in L_2(\mathbb{T}, \mathbb{C})$
is \hlnotec{completely determined by its Fourier coefficients}.
Moreover, given any sequence $(\beta_n)_{n \in \mathbb{Z}}
\in \ell2(\mathbb{Z}, \mathbb{C})$, $\exists [f] \in L_2(\mathbb{T}, \mathbb{C})$
such that $\alpha_n^{[f]} = \beta_n$, for all $n \in \mathbb{Z}$.
Now let $[f] \in L_2(\mathbb{T}, \mathbb{C})$.
For each $N \in \mathbb{N} \setminus \{ 0 \}$, set
\begin{equation*}
\Delta_N([f]) = \sum_{n = -N}^{N} \alpha_n^{[f]} [\xi_n].
\end{equation*}
We shall call $\Delta_N([f])$ as the
\hldefn{$N$\textsuperscript{th} partial sum of the Fourier series} of $[f]$.
It follows from \cref{thm:characterization_of_an_onb} that
\begin{equation*}
[f] = \lim_{N \to \infty} \Delta_N([f]),
\end{equation*}
where the convergence is relative to the $\norm{\cdot}_2$-norm
which was mentioned above.
\end{eg}
\begin{note}
This is a beautiful occurrence, having functions that can be
written uniquely (up to a set of measure zero) as a linear
combination of the ONB $\{[\xi_n]\}_{n \in \mathbb{Z}}$,
which is a very powerful result that is often used in linear algebra.
\end{note}
We can then ask the question of whether the same result holds for other
similarly defined $L_p(\mathbb{T}, \mathbb{C})$, for $1 \leq p \leq \infty$
where $p \neq 2$. We shall focus on $L_1$.
Unfortunately, we shall see that this doesn't hold.
The rest of the course is dedicated to showing this.
\begin{fullwidth}
\begin{notation}
We shall note down here notations and definitions of which we've
seen but require some modification for the purposes of our discussion.
\begin{itemize}
\item $\Trig(\mathbb{T}, \mathbb{C})
\coloneqq \Span \{ \xi_n : n \in \mathbb{Z} \}
= \{ \sum_{n=-N}^{N} \alpha_n \xi_n
: \alpha_n \in \mathbb{C} : N \in \mathbb{N} \setminus \{ 0 \}$;
\item $\mathcal{C}(\mathbb{T}, \mathbb{C})
\coloneqq \{ f : \mathbb{R} \to \mathbb{C} : f
\text{ is continuous and } 2 \pi \text{-periodic} \}$;
\item $\SIMP(\mathbb{T}, \mathbb{C})
\coloneqq \{ f : \mathbb{R} \to \mathbb{C} :
f \restriction_{[-\pi, \pi)} \text{ is a simple function
and } f \text{ is } 2 \pi \text{-periodic} \}$;
\item $\STEP(\mathbb{T}, \mathbb{C})
\coloneqq \{ f : \mathbb{R} \to \mathbb{C} :
f \restriction_{[-\pi, \pi)} \text{ is a step function
and } f \text{ is } 2 \pi \text{-periodic} \}$;
\item for $1 \leq p < \infty$,
\begin{align*}
\mathcal{L}_p(\mathbb{T}, \mathbb{C})
&\coloneqq \{ f : \mathbb{R} \to \mathbb{C} :
f \text{ is measurable, } 2 \pi \text{-periodic,
and } \int_{[-\pi, \pi)} \abs{f}^p < \infty \};
\end{align*}
\item and for $p = \infty$,
\begin{align*}
\mathcal{L}_p(\mathbb{T}, \mathbb{C})
&\coloneqq \{ f : \mathbb{R} \to \mathbb{C} :
f \text{ is measurable, } 2 \pi \text{-periodic,
and essentially bounded } \};
\end{align*}
\end{itemize}
Note that
\begin{equation*}
\Trig(\mathbb{T}, \mathbb{C}) \subseteq \mathcal{C}(\mathbb{T}, \mathbb{C})
\subseteq \mathcal{L}_p(\mathbb{T}, \mathbb{C}), \quad 1 \leq p \leq \infty.
\end{equation*}
\end{notation}
\end{fullwidth}
% section introduction_to_fourier_analysis (end)
% chapter lecture_16_jul_04th_2019 (end)
\chapter{Lecture 17 Jul 09th 2019}%
\label{chp:lecture_17_jul_09th_2019}
% chapter lecture_17_jul_09th_2019
\section{Introduction to Fourier Analysis (Continued)}%
\label{sec:introduction_to_fourier_analysis_continued}
% section introduction_to_fourier_analysis_continued
As was the case with $p = 2$, for each $1 \leq p < \infty$,
$\mathcal{L}_p(\mathbb{T}, \mathbb{C})$ forms a vector space over $\mathbb{C}$,
and the map
\begin{align*}
\nu_p : \mathcal{L}_p(\mathbb{T}, \mathbb{C}) &\to \mathbb{R} \\
f &\mapsto \left( \frac{1}{2 \pi} \int_{[-\pi, \pi)} \abs{f}^p \right)^{1 / p}
\end{align*}
defines a seminorm on $\mathcal{L}_p(\mathbb{T}, \mathbb{C})$
For $p = \infty$, echoing a similar argument as in
\cref{sub:completeness_of_l_infty_e_k_}, we have that
\begin{equation*}
\nu_\infty(f) \coloneqq \inf \{ \delta > 0 :
m \{ \theta \in [-\pi, \pi) : \abs{f(\theta)} > \delta \} = 0 \},
\end{equation*}
for $f \in \mathcal{L}_\infty(\mathbb{T}, \mathbb{C})$ is a seminorm
on $\mathcal{L}_\infty(\mathbb{T}, \mathbb{C})$.
By \cref{propo:kernel_of_a_vector_space_is_a_linear_manifold},
for each $1 \leq p \leq \infty$, we can obtain a norm $\norm{\cdot}_p$
on
\begin{equation*}
L_p(\mathbb{T}, \mathbb{C})
\coloneqq \mathcal{L}_p(\mathbb{T}, \mathbb{C}) \Big/
\mathcal{N}_p(\mathbb{T}, \mathbb{C}),
\end{equation*}
where
\begin{equation*}
\mathcal{N}_p(\mathbb{T}, \mathbb{C}) \coloneqq \{ f \in
\mathcal{L}_p(\mathbb{T}, \mathbb{C}) : \nu_p(f) = 0 \}.
\end{equation*}
\sidenote{claftosel} % TODO: clarify to self
Again, we can find that $[f] = [g] \in L_p(\mathbb{T}, \mathbb{C})$
iff $f = g$ a.e. on $\mathbb{R}$.
\begin{ex}
Verify that for $f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$
\begin{equation*}
\norm{[f]}_\infty = \norm{f}_{\sup}
\coloneqq \sup \{ \abs{f(\theta)} : \theta \in [-\pi, \pi) \}.
\end{equation*}
\end{ex}
\begin{note}
Note that the supremum on the RHS of the above equation
is a finite number, since $f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$
implies that $f$ is continuous on $\mathbb{R}$,
and hence $f$ is bounded on $[-\pi, \pi] \supseteq [-\pi, \pi)$.
\end{note}
Given any function $f : [-\pi, \pi) \to \mathbb{C}$,,
let $\check{f} : \mathbb{R} \to \mathbb{C}$ be the $2 \pi$-periodic extension
of $f$; i.e. $\check{f}(\theta) = f(\theta)$ for $\theta \in [-\pi, \pi)$
and $\check{f}(\theta + 2 \pi) = \check{f}(\theta)$ for $\theta \in \mathbb{R}$ .
It is clear that $\check{f}$ always exists
and is uniquely defined by $f$ .
\begin{thm}[The $2 \pi$ periodic extension map is an isometric isomorphism]\label{thm:the_2_pi_periodic_extension_map_is_an_isometric_isomorphism}
Let $1 \leq p \leq \infty$. The map
\begin{align*}
\Xi_p : L_p([-\pi, \pi), \mathbb{C}) &\to L_p(\mathbb{T}, \mathbb{C}) \\
[f] &\mapsto [\check{f}]
\end{align*}
is an isometric isomorphism.
\end{thm}
\begin{ex}
Prove \cref{thm:the_2_pi_periodic_extension_map_is_an_isometric_isomorphism}.
\end{ex}
It follows from the above isometric isomorphism that all of our results
about $L_p$-spaces hold for their respective
$L_p(\mathbb{T}, \mathbb{C})$ counterparts.
Let us now focus on $L_1(\mathbb{T}, \mathbb{C})$.
\begin{defn}[The Fourier Coefficients and The Fourier Series]\index{Fourier Coefficient}\index{Fourier Series}\label{defn:fourier_coefficient_and_fourier_series}
For $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$ and $n \in \mathbb{Z}$,
we refer to
\begin{equation*}
\hat{f}(n) \coloneqq \frac{1}{2 \pi} \int_{[-\pi, \pi)} f \overline{\xi_n}
\end{equation*}
as the $n$\textsuperscript{th}-\hlnoteb{Fourier coefficient} of $f$.
We also refer to
\begin{equation*}
\sum_{n \in \mathbb{Z}} \hat{f}(n) \xi_n
\end{equation*}
as the \hlnoteb{Fourier series} of $f$ in $\mathcal{L}1(\mathbb{T}, \mathbb{C})$.
\end{defn}
\begin{remark}
If $f, g \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$
and $f = g$ a.e. on $[-\pi, \pi)$, then
\begin{equation*}
\hat{f}(n) = \frac{1}{2 \pi} \int_{[-\pi, \pi)} f \overline{\xi_n}
= \frac{1}{2\pi} \int_{[-\pi, \pi)} g \overline{\xi_n}
= \hat{g}(n),\quad \forall n \in \mathbb{Z}.
\end{equation*}
Thus, if we set the \hlnoteb{$n$\textsuperscript{th}-Fourier coefficient}
of $[f] \in L_1(\mathbb{T}, \mathbb{C})$ as
\begin{equation*}
\alpha_n^{[f]} \coloneqq \hat{f}(n), \quad n \in \mathbb{Z},
\end{equation*}
as we did in \cref{eg:fourier_for_bigl2_in_complex_unit_circle},
then $\alpha_n^{[f]}$ is well-defined. We can thus define
\begin{equation*}
\sum_{n \in \mathbb{Z}} \alpha_n^{[f]} [\xi_n]
\end{equation*}
as the \hlnoteb{Fourier series} of $[f]$.
\end{remark}
Notice that we did not mention the convergence of the above series.
Up to now, the Fourier series is simply a formal power series,
meant only to represent the sequence of partial sums
\begin{equation*}
\left( \sum_{n = -N}^{N} \alpha_n^{[f]} [\xi_n] \right)_{N = 0}^{\infty}.
\end{equation*}
We shall study about the convergence of the series.
Note that we may extend the notion of a Fourier coefficient for
non-integer powers of $e^{i \theta}$;
i.e. for $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$ and $r \in \mathbb{R}$,
we define
\begin{equation*}
\hat{f}(r) = \frac{1}{2 \pi} \int_{[-\pi, \pi)} f \overline{\xi_r},
\end{equation*}
where $\xi_r(\theta) = e^{ir\theta}$ for all $\theta \in \mathbb{R}$.
\begin{remark}
In the case of $p = 2$, we've seen that
$\left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}} \in \ell_2(\mathbb{Z}, \mathbb{C})$.
While this does not hold for $[f] \in L_1(\mathbb{T}, \mathbb{C})$,
we can actually get pretty close.
First, notice that $\abs{\xi_r(\theta)} = 1$ for all $\theta \in \mathbb{R}$
and $r \in \mathbb{R}$. Thus for $f \in \mathcal{L}_1(\mathbb{T},
\mathbb{C})$,
\begin{align*}
\abs{\hat{f}(n)}
&= \abs{ \frac{1}{2 \pi} \int_{[-\pi, \pi)} f \overline{\xi_r} } \\
&\leq \frac{1}{2 \pi} \int_{[-\pi, \pi)} \abs{f \overline{\xi_r}} \\
&= \frac{1}{2 \pi} \int_{[-\pi, \pi)} \abs{f} \\
&= \nu_1(f) = \norm{[f]}_1.
\end{align*}
So as before, if $f, g \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$,
and $f = g$ a.e. on $[\mathbb{T}]$, then $\hat{f}(r) = \hat{g}(r)$
for all $r \in \mathbb{R}$.
Thus, we may define $\alpha_r^{[f]} \coloneqq \hat{f}(r)$,
for $r \in \mathbb{R}$.
It follows that
\begin{equation*}
\sup_{r \in \mathbb{R}} \abs{\alpha_r^{[f]}}
= \sup_{r \in \mathbb{R}} \abs{\hat{f}(r)}
\leq \norm{[f]}_1
\end{equation*}
for all $[f] \in L_1(\mathbb{T}, \mathbb{C})$.
In particular, we have that
\begin{equation*}
\left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}}
\in \ell_{\infty}(\mathbb{Z}, \mathbb{C}).
\end{equation*}
We can, in fact, do better.
\end{remark}
Let
\begin{equation*}
c_0(\mathbb{Z}, \mathbb{C}) \coloneqq \left\{ (z_n)_{n=1}^{\infty} :
\forall n \in \mathbb{N}\; z_n \in \mathbb{C} \land
\lim_{n \to \infty} z_n = 0 \right\}.
\end{equation*}
\begin{thm}[The Riemann-Lebesgue Lemma]\index{The Riemann-Lebesgue Lemma}\label{thm:the_riemann_lebesgue_lemma}
Let $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$. Then
\begin{equation*}
\lim_{r \to \infty} \hat{f}(r) = 0 = \lim_{r \to - \infty} \hat{f}(r).
\end{equation*}
In particular,
\begin{equation*}
\left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}} \in c_0(\mathbb{Z},
\mathbb{C}).
\end{equation*}
\end{thm}
\begin{strategy}
The key here is to realize that this is simple in the case of characteristic
functions of an interval.
Since the Lebesgue integration is linear, the span of the $2 \pi$-periodic
extensions of these characteristic functions of intervals is
$\STEP(\mathbb{T}, \mathbb{C})$.
The result would hold for its equivalence classes,
and we then simply need to appeal to the density of
$[\STEP(\mathbb{T}, \mathbb{C})]$ in $L_1(\mathbb{T}, \mathbb{C})$,
which then gives us the result.
\end{strategy}
\begin{proof}
\hlbnoted{Case: Characteristic functions}
Let $f_0$ be the characteristic of an interval $[s, t] \subseteq [-\pi, \pi)$,
i.e. $f_0 = \chi_{[s, t]}$.
Let $f \coloneqq \check{f}_0$ be the $2 \pi$-periodic extension
of $f_0$ to $\mathbb{R}$, so that $f \in \STEP(\mathbb{T}, \mathbb{C})$.
Then $f$ is continuous and, in particular, bounded, over a bounded interval,
\hyperref[thm:bounded_riemann_integrable_functions_are_lebesgue_integrable]{
$f$ is Riemann integrable as well}.
Then
\begin{align*}
\hat{f}(r)
&= \frac{1}{2 \pi} \int_{[-\pi, \pi)} \chi_{[s, t]} \overline{\xi_r} \\
&= \frac{1}{2 \pi} \int_{s}^{t} e^{-ir \theta} \dif{\theta} \\
&= \frac{1}{2 \pi} \left( \frac{e^{-ir t} - e^{-ir s}}{-ir} \right).
\end{align*}
Thus
\begin{equation*}
\abs{\hat{f}(r)}
\leq \frac{\abs{e^{-irt}} + \abs{e^{-irs}}}{2 \pi \abs{-ir}}
= \frac{2}{2 \pi \abs{r}} = \frac{1}{\pi \abs{r}}.
\end{equation*}
It is clear that
\begin{equation*}
\lim_{r \to \infty} \hat{f}(r) = 0 = \lim_{r \to -\infty} \hat{f}(r).
\end{equation*}
\noindent
\hlbnoted{Case: Step functions}
Let $f \in \STEP(\mathbb{T}, \mathbb{C})$,
and $f_0 \coloneqq f \restriction_{[-\pi, \pi)}$, and write
$f_0 = \sum_{k=1}^{M} \beta_k \chi_{H_k}$ as a disjoint representation,
where each $H_k = [s_k, t_k]$ is a subinterval $[-\pi, \pi)$.
Then the result follows almost exactly like the last case for
characteristic functions, while making use of the linearity of
the Lebesgue integral.
\noindent
\hlbnoted{Case: Final, generic case}
Let $[f] \in L_1(\mathbb{T}, \mathbb{C})$ and $\epsilon > 0$.
By the density of $[\STEP(\mathbb{T}, \mathbb{C})]$
in $L_1(\mathbb{T}, \mathbb{C})$,
let $g \in \STEP(\mathbb{T}, \mathbb{C})$ such that
\begin{equation*}
\norm{[f] - [g]}_1 \leq \frac{\epsilon}{2}.
\end{equation*}
Then
\begin{align*}
\hat{f}(r)
&= \frac{1}{2 \pi} \int_{[-\pi, \pi)} f \overline{\xi_r} \\
&= \frac{1}{2 \pi} \int_{[-\pi, \pi)} (f - g) \overline{\xi_r}
+ \frac{1}{2 \pi} \int_{[-\pi, \pi)} g \overline{\xi_r} \\
&= \widehat{f - g}(r) + \hat{g}(r).
\end{align*}
As seen before,
we have that
\begin{equation*}
\abs{\widehat{f - g}(r)} \leq \nu_1(f - g)
= \norm{[f - g]}_1 = \norm{[f] - [g]}_1 < \frac{\epsilon}{2}.
\end{equation*}
Now from the previous case, since $g \in \STEP(\mathbb{T}, \mathbb{C})$,
we may choose $N > 0$ such that $\abs{r} > N$ so that
$\abs{\hat{g}(r)} < \frac{\epsilon}{2}$.
Thus $\abs{r} > N$ implies that
\begin{equation*}
\abs{\hat{f}} \leq \abs{ \widehat{f - g}(r) } + \abs{ \hat{g}(r) }
< \frac{\epsilon}{2} + \frac{\epsilon}{2} = \epsilon.
\end{equation*}
Thus
\begin{equation*}
\lim_{r \to \infty} \hat{f}(r) = 0 = \lim_{r \to -\infty} \hat{f}(r),
\end{equation*}
as required.
Recall that
\begin{equation*}
\alpha_n^{[f]} = \hat{f}(n), \quad n \in \mathbb{Z}.
\end{equation*}
It is clear that $\hat{f}(n) = \frac{1}{2 \pi} \int_{[-\pi, \pi)} f
\overline{\xi_n} \in \mathbb{C}$ and so $\alpha_n^{[f]} \in \mathbb{C}$,
and
\begin{equation*}
\lim_{n \to \infty} \alpha_n^{[f]} = \lim_{n \to \infty} \hat{f}(n) = 0.
\end{equation*}
Thus $\left(\alpha_n^{[f]}\right)_{n \in \mathbb{Z}} \in c_0(\mathbb{Z}, \mathbb{C})$.
\end{proof}
\begin{remark}
Recall that we had $[f] \in L_2(\mathbb{T}, \mathbb{C})$ iff
$(\alpha_n^{[f]})_{n \in \mathbb{Z}} \in \ell_2(\mathbb{Z}, \mathbb{C})$.
We have shown that if $[f] \in L_1(\mathbb{T}, \mathbb{C})$ implies that
\begin{equation*}
(\alpha_n^{[f]})_{n \in \mathbb{Z}} \in c_0(\mathbb{Z}, \mathbb{C}).
\end{equation*}
However, the converse is not true. We shall see in the final chapter
that the map
\begin{align*}
\Lambda : (L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)
&\to (c_0(\mathbb{Z}, \mathbb{C}), \norm{\cdot}_\infty) \\
[f] &\mapsto \left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}}
\end{align*}
is a continuous, injective linear map, but it is \hlnotec{not surjective}.
\end{remark}
We are left with some other questions as well;
for $[f] \in L_1(\mathbb{T}, \mathbb{C})$:
\begin{enumerate}
\item does the Fourier series $\sum_{n \in \mathbb{Z}} \alpha_n^{[f]} [\xi_n]$
of $f$ converge, and if so, in which sense?
Is it pointwise (a.e.), uniformly, or in the $L_1$-norm?
\item if the Fourier series does converge in some sense,
is the value $f$ itself?
\item Is $[f]$ completely determined by its Fourier series, as
we have seen for $L_2$? That is, if $[f], [g] \in L_1(\mathbb{T}, \mathbb{C})$,
and $\alpha_n^{[f]} = \alpha_n^{[g]}$ for all $n \in \mathbb{Z}$,
is it true that $[f] = [g]$?
\end{enumerate}
% section introduction_to_fourier_analysis_continued (end)
\section{Convolution}%
\label{sec:convolution}
% section convolution
Recall that an algebra is a vector space over some field $\mathbb{F}$
which also happens to be a ring.
A \hldefn{Banach algebra} $\mathcal{A}$ is a Banach space over $\mathbb{K}$
which is also an algebra, where \hlbnoteb{multiplication is jointly continuous}
since it satisfies the inequality
\begin{equation*}
\norm{ab} \leq \norm{a} \norm{b}
\end{equation*}
for all $a, b \in \mathcal{A}$.
\begin{eg}
$(\mathcal{C}(X, \mathbb{K}), \norm{\cdot}_{\sup})$
is a Banach algebra for each \hlbnotec{locally compact, Hausdorff
topological space} $X$.
In particular, $\mathbb{M}_n(\mathbb{K}) \simeq \mathcal{B}(\mathbb{K}^n)$
(for each $n \geq 1$), when equipped with the operator norm,
is a Banach algebra.
\end{eg}
Thus far, we have seen that $L_1(\mathbb{T}, \mathbb{C})$ is a Banach space,
but we have not studied about whether it has any multiplicative structure.
For $f, g \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$, we set
\begin{equation*}
g \diamond f(\theta) \coloneqq \frac{1}{2 \pi}
\int_{[-\pi, \pi)} g(s) f(\theta - s) \dm(s),
\end{equation*}
where $\dm(s)$ is similar to the notation $\dif{x}$ in Riemann integration,
only as an indicator of the variable of which the integration is performed
with respect to (wrt).
We refer to $g \diamond f(\theta)$ as the \hldefn{convolution} of $g$ and $f$.
Observe that if $[f_1] = [f_2], [g_1] = [g_2] \in L_1(\mathbb{T}, \mathbb{C})$,
then $g_1 \diamond f_1 = g_2 \diamond f_2$ a.e., and so we may define
\begin{equation*}
[g] * [f] \coloneqq [g \diamond f],
\end{equation*}
for all $[f], [g] \in L_1(\mathbb{T}, \mathbb{C})$.
The careful reader would quickly notice the following 2 points:
\begin{enumerate}
\item it is not clear that $g \diamond f \in \mathbb{C}$
for any $\theta \in \mathbb{R}$;
\item it is much less clear that $g \diamond f \in \mathcal{L}_1(\mathbb{T},
\mathbb{C})$.
\end{enumerate}
To prove the above statement, we require \hlnotea{Fubini's Theorem},
which requires quite a bit of work.
We shall work around Fubini's Theorem due to the overhead
that we have to take on.
Instead, we shall instead show that we can turn $\mathcal{L}_1(\mathbb{T}, \mathbb{C})$
(and in turn $L_1(\mathbb{T}, \mathbb{C})$) into a so-called
\hldefn{left module} over $\mathcal{C}(\mathbb{T}, \mathbb{C})$
using convolution.
\sidenote{\href{https://en.wikipedia.org/wiki/Left_module}{Wikipedia article}
for left module.}
That is, given \hlbnotee{$g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$} and
$f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$, we shall set
\begin{equation*}
g \diamond f(\theta) \coloneqq \frac{1}{2 \pi} \int_{[-\pi, \pi)} g(s)f(\theta
- s) \dm(s),
\end{equation*}
and prove that $g \diamond f \in \mathcal{C}(\mathbb{T}, \mathbb{C}) \subseteq
\mathcal{L}_1(\mathbb{T}, \mathbb{C})$.
\sidenote{Note $\mathcal{C}(\mathbb{T}, \mathbb{C}) \subseteq
\mathcal{L}_p(\mathbb{T}, \mathbb{C})$ for $1 \leq p \leq \infty$.}
Now if this is true, then if $f_1 \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$
and $f_1 = f$ a.e. on $\mathbb{R}$,
then $g \diamond f(\theta) = g \diamond f_1(\theta)$ for all $\theta \in \mathbb{R}$,
then $g \diamond f = g \diamond f_1$,
and we can thus define
\begin{equation*}
g * [f] = [g \diamond f], \quad [f] \in L_1(\mathbb{T}, \mathbb{C}).
\end{equation*}
One advantage to convolving with continuous functions only
is that we can make use of the Riemann integral.
This will allow us to garner more information about the
continuity properties of $\diamond$,
and ultimately about convergence properties of the Fourier series.
\begin{lemma}[Preservation of the Lebesgue Integral of $2 \pi$-periodic functions under certain Transformations]\label{lemma:preservation_of_the_lebesgue_integral_of_2pi_periodic_functions_under_certain_transformations}
Let $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$ and $\theta \in \mathbb{R}$.
\begin{enumerate}
\item
\begin{equation*}
\int_{[-\pi, \pi)} f = \int_{[-\pi, \pi)} \tau_s^\circ(f),
\end{equation*}
where $\tau_s^\circ(f)(\theta) = f(\theta - s)$ is a translation of $f$ by
$s$.
\item If $h(s) \coloneqq f(-s)$, $s \in \mathbb{R}$, is a
reflection of $f$ (on some axis), then
\begin{equation*}
\int_{[-\pi, \pi)} h = \int_{[-\pi, \pi]} f.
\end{equation*}
\item Let $\phi_{f; \theta} : \mathbb{R} \to \mathbb{C}$ be
$\phi_{f, \theta}(s) = f(\theta - s)$.
Then $\phi_{f, \theta} \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$ and
\begin{equation*}
\nu_1(\phi_{f, \theta}) = \nu_1(f).
\end{equation*}
That is,
\begin{equation*}
\frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{f(\theta - s)} \dm(s)
= \frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{f(t)} \dm(t).
\end{equation*}
\end{enumerate}
\end{lemma}
\begin{proof}
The proof of this lemma is in A6Q1.
\end{proof}
\begin{defn}[Convolution]\index{Convolution}\label{defn:convolution}
Let $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$ and
$g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$.
We define the \hlnoteb{convolution} of $f$ by $g$ to be the function
\begin{align*}
g \diamond f : \mathbb{R} &\to \mathbb{C} \\
\theta &\mapsto \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s)f(\theta - s) \dm(s).
\end{align*}
\end{defn}
We still have not shown that $g \diamond f(\theta) \in \mathbb{C}$
for each $\theta \in \mathbb{R}$. Let's do that right now.
Fixing $\theta \in \mathbb{R}$, we see that
by
\cref{lemma:preservation_of_the_lebesgue_integral_of_2pi_periodic_functions_under_certain_transformations},
\begin{align*}
\abs{g \diamond f(\theta)}
&= \abs{ \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s)f(\theta - s) \dm(s) } \\
&\leq \frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{g(s)} \abs{f(\theta - s)} \dm(s) \\
&\leq \norm{g}_{\sup} \frac{1}{2\pi} \int_{[-\pi, \pi)}
\abs{\phi_{f, \theta}(s)} \dm(s) \\
&= \norm{g}_{\sup} \nu_1(\phi_{f, \theta}) \\
&= \norm{g}_{\sup} \nu_1(f) < \infty.
\end{align*}
It follows that $g \diamond f$ is indeed a complex-valued function.
The following is an extremely important lemma that we shall use extensively.
\begin{lemma}[Swapping Convolutions]\label{lemma:swapping_convolutions}
Let $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$ and
$g \in \mathcal{L}_\infty(\mathbb{T}, \mathbb{C})$.
If $\theta \in \mathbb{R}$, then
\begin{equation*}
\int_{[-\pi, \pi)} g(s) f(\theta - s) \dm(s)
= \int_{[-\pi, \pi)} g(\theta - t) f(t) \dm(t).
\end{equation*}
In particular, this holds if
\begin{equation*}
f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C}) \quad \text{ and } \quad
g \in \mathcal{C}(\mathbb{T}, \mathbb{C}).
\end{equation*}
\end{lemma}
\begin{proof}
The proof of this lemma is in A6Q2.
\end{proof}
\begin{remark}
With \cref{lemma:swapping_convolutions},
for $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$ and
$g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$,
we can define the \hldefn{convolution} of $g$ by $f$ as
\begin{equation*}
f \diamond g(\theta) = \frac{1}{2\pi} \int_{[-\pi, \pi)} g(\theta - t) f(t) \dm(t).
\end{equation*}
Consequently, we have that $f \diamond g(\theta) = g \diamond f(\theta)$
for all $\theta \in \mathbb{R}$, and so we shall simply refer
to this function as \hlbnoteb{the convolution of $f$ and $g$}.
\end{remark}
\begin{ex}\label{ex:cts_2pi_periodic_fns_are_unifly_cts}
Let $h : \mathbb{R} \to \mathbb{C}$ be a $2 \pi$-periodic
and continuous function.
Prove that $h$ is uniformly continuous.
\sidenote{This is a rather simple (even proof-wise) but important realization
in our theories going forward.}
\end{ex}
\begin{propo}[Continuity of the Convolution of $f$ and $g$ where $g$ is Continuous]\label{propo:continuity_of_the_convolution_of_f_and_g_where_g_is_continuous}
Let $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ and
$f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$.
Then $g \diamond f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$.
\end{propo}
\begin{proof}
First, note that by \cref{ex:cts_2pi_periodic_fns_are_unifly_cts},
$g$ is uniformly continuous.
Let $\epsilon > 0$.
We can then choose $\delta > 0$ such that $\forall x, y \in \mathbb{R}$,
$\abs{x - y} < \delta$ implies that $\abs{g(x) - g(y)} <
\frac{\epsilon}{\nu_1(f)}$.
Now for any $\theta, \theta_0 \in \mathbb{R}$ such that
$\abs{\theta - s - (\theta_0 - s)} = \abs{\theta - \theta_0} < \delta$,
for any $s \in \mathbb{R}$, we have that
\begin{equation*}
\abs{ g(\theta - s) - g(\theta_0 - s) } < \frac{\epsilon}{\nu_1(f)}.
\end{equation*}
Then by \cref{lemma:swapping_convolutions} and the last remark, we have
\begin{align*}
&\abs{g \diamond f(\theta) - g \diamond f(\theta_0)} \\
&= \frac{1}{2\pi} \abs{ \int_{[\pi, \pi)} g(\theta - s)f(s)
- g(\theta_0 - s)f(s) \dm(s) } \\
&\leq \frac{1}{2\pi} \int_{[-\pi, \pi)}
\abs{g(\theta - s) - g(\theta_0 - s)} \abs{f(s)} \dm(s) \\
&< \frac{1}{2\pi} \int_{[-\pi, \pi)} \frac{\epsilon}{\nu_1(f)} \abs{f(s)} \dm(s) \\
&= \frac{\epsilon}{\nu_1(f)} \nu_1(f) = \epsilon.
\end{align*}
Thus $g \diamond f$ is (uniformly) continuous.
That $g \diamond f$ is $2 \pi$-periodic follows from $g$ and $f$
being $2 \pi$-periodic themselves.
\end{proof}
\newthought{We now want} to see if given $[f_1] = [f_2] \in L_1(\mathbb{T}, \mathbb{C})$,
do we have $g \diamond f_1 = g \diamond f_2$?
This is, in particular, motivated by what we already saw in $L_2(\mathbb{T}, \mathbb{C})$,
where this realization allowed us to work solely
with $L_2(\mathbb{T}, \mathbb{C})$ instead of $\mathcal{L}_2(\mathbb{T}, \mathbb{C})$.
Fortunately, this indeed holds for $L_1(\mathbb{T}, \mathbb{C})$.
Observe that if $[f_1] = [f_2] \in L_1(\mathbb{T}, \mathbb{C})$, then
$f_1 = f_2$ a.e. on $\mathbb{R}$, which then $gf_1 = gf_2$ a.e.
We thus see that $\forall \theta \in \mathbb{R}$, and any $s \in \mathbb{R}$,
\begin{align*}
g \diamond f_1(\theta)
&= f_1 \diamond g(\theta) \\
&= \frac{1}{2\pi} \int_{[-\pi, \pi)} g(\theta - s) f_1(s) \dm(s) \\
&= \frac{1}{2\pi} \int_{[-\pi, \pi]} g(\theta - s) f_2(s) \dm(s) \\
&= f_2 \diamond g(\theta) = g \diamond f_2(\theta).
\end{align*}
We may thus extend our notion of convolutions to $L_1(\mathbb{T}, \mathbb{C})$.
\begin{defn}[Convolution on $L_1(\mathbb{T}, \mathbb{C})$]\index{Convolution}\label{defn:convolution_on_romanl_1_mathbb_t_mathbb_c_}
Given $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ and $[f] \in L_1(bt, \mathbb{C})$,
we define the \hlnoteb{convolution} of $g$ and $[f]$ to be
\begin{equation*}
g * [f] \coloneqq [g \diamond f],
\end{equation*}
where $g \diamond f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$
is the convolution introduced in \cref{defn:convolution}.
\end{defn}
\begin{defn}[Convolution Operator with Kernel]\index{Convolution Operator}\index{Kernel}\label{defn:convolution_operator_with_kernel}
We define the \hlnoteb{convolution operator} with \hlnoteb{kernel} $g$
to be the map
\begin{align*}
C_g : L_1(\mathbb{T}, \mathbb{C}) &\to L_1(\mathbb{T}, \mathbb{C}) \\
[f] &\mapsto g * [f].
\end{align*}
\end{defn}
\begin{warning}
The kernel defined above has nothing to do with the notion of
kernels in abstract algebra.
\end{warning}
\begin{remark}
Observe that if $[f_1], f_2 \in L_1(\mathbb{T}, \mathbb{C})$,
and if $\kappa \in \mathbb{C}$, then
\begin{align*}
C_g(\kappa[f_1] + [f_2])
&= g * [\kappa f_1 + f_2] \\
&= \frac{1}{2\pi} \int_{[-\pi, \pi)}
g(s)(\kappa f_1(\theta - s) + f_2(\theta - s)) \dm(s) \\
&= \kappa \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s) f_1(\theta - s) \dm(s) \\
&\qquad + \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s) f_2(\theta - s) \dm(s) \\
&= \kappa g * [f_1] + g * [f_2] \\
&= \kappa C_g([f_1]) + C_g([f_2]).
\end{align*}
Thus $C_g$ is a \hlbnotec{linear map} on $L_1(\mathbb{T}, \mathbb{C})$.
Since $(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$ is a Banach space,
and $C_g$ is linear, it is natural to ask if $C_g$ is bounded,
\sidenote{This would also mean that $C_g$ is continuous.}
and if so, what is its operator norm?
We shall see that the answer to this question
is intimately related to the question of convergence
of Fourier series of elements of $L_1(\mathbb{T}, \mathbb{C})$.
\end{remark}
With our current tool set, it is rather difficult to directly compute
$\norm{C_g}$. In particular, we have to deal with monstrosities
of the following form:
\begin{equation*}
\frac{1}{2\pi} \int_{[-\pi, \pi)} \left(
\frac{1}{2\pi} \int_{[-\pi, \pi)} g(s) f(\theta - s) \dm(s)
\right) e^{-in\theta} \dm(\theta).
\end{equation*}
What we shall do is to reformulate $C_g$ as a vector-valued Riemann integral
on $L_1(\mathbb{T}, \mathbb{C})$. We shall be able to extend this notion
of convolution beyond the Banach space $L_1(\mathbb{T}, \mathbb{C})$.
To that end, we first need to understand the notion of a
\hlnotea{homogeneous Banach space}.
% section convolution (end)
% chapter lecture_17_jul_09th_2019 (end)
\chapter{Lecture 18 Jul 11th 2019}%
\label{chp:lecture_18_jul_11th_2019}
% chapter lecture_18_jul_11th_2019
\section{Convolution (Continued)}%
\label{sec:convolution_continued}
% section convolution_continued
Let $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$, and $s \in \mathbb{R}$.
Consider the function
\begin{align*}
\tau_s^{\circ}(f) : \mathbb{R} &\to \mathbb{C} \\
\theta &\mapsto f(\theta - s),
\end{align*}
which we have seen before.
One should think of $\tau_s^\circ$ as \hlbnotec{translating $f$ by $s$}.
The superscript $\circ$ above $\tau_s$ is to indicate
that we are acting on \hlbnotec{functions}.
When working with elements of $L_1(\mathbb{T}, \mathbb{C})$,
we shall drop this superscript.
Now, since $\mathfrak{M}(\mathbb{R})$ is invariant under translation,
the Lebesgue measure is translation-invariant,
and the set of $2\pi$-periodic functions is invariant under translation
implies that
\begin{equation*}
\tau_s^\circ(f) \in \mathcal{L}_1(\mathbb{T}, \mathbb{C}).
\end{equation*}
Furthermore, if $[f] = [g] \in L_1(\mathbb{T}, \mathbb{C})$, then
\begin{equation*}
[\tau_s^{\circ}(f)] = [\tau_s^{\circ}(g)].
\end{equation*}
Thus, we may define the operation of \hldefn{translation by $s$}
on $L_1(\mathbb{T}, \mathbb{C})$ as
\begin{equation*}
\tau_s([f]) \coloneqq [\tau_s^{\circ}(f)].
\end{equation*}
\begin{defn}[Homogeneous Banach Spaces]\index{Homogeneous Banach Spaces}\label{defn:homogeneous_banach_spaces}
A \hlnoteb{homogeneous Banach space} over $\mathbb{T}$
is a linear manifold $\mathfrak{B}$ in $L_1(\mathbb{T}, \mathbb{C})$,
equipped with the norm $\norm{\cdot}_{\mathfrak{B}}$
wrt to which $(\mathfrak{B}, \norm{\cdot}_{\mathfrak{B}})$ is a Banach space,
satisfying
\begin{enumerate}
\item $\norm{[f]}_1 \leq \norm{[f]}_{\mathfrak{B}}$ for all $[f] \in \mathfrak{B}$;
\item $[\Trig(\mathbb{T}, \mathbb{C})] \subseteq \mathfrak{B}$;
\item $\mathfrak{B}$ is invariant under translation; i.e.
$\forall [f] \in \mathfrak{B}$ and $s \in \mathbb{R}$,
\begin{equation*}
\tau_s[f] = [\tau_s^{\circ}(f)] \in \mathfrak{B};
\end{equation*}
\item $\forall [f] \in \mathfrak{B},\, s \in \mathbb{R}$,
$\norm{\tau_s[f]}_{\mathfrak{B}} = \norm{[f]}_{\mathfrak{B}}$; and
\item for each $[f] \in \mathfrak{B}$, the map
\begin{align*}
\Psi_{[f]} : \mathbb{R} &\to \mathfrak{B} \\
s &\mapsto \tau_s[f]
\end{align*}
is continuous.
\sidenote{This means that the translation itself is
a continuous process on a homogeneous Banach space.}
\end{enumerate}
\end{defn}
\begin{remark}
It may be surprising to find that
a linear manifold $\mathfrak{M}$ of a Banach space $\mathfrak{X}$
may not be closed in the ambient norm,
but that $(\mathfrak{M}, \norm{\cdot}_{\mathfrak{M}})$ is complete
in its own norm.
But one may quickly notice
that each of the spaces $L_p(\mathbb{T}, \mathbb{C})$
is dense in $L_1(\mathbb{T}, \mathbb{C})$, for $1 \leq p < \infty$,
and each of them is complete under their corresponding $\norm{\cdot}_p$-norm.
So we've already seen the above `surprising' fact.
\end{remark}
\begin{eg}[{$([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty})$} is a homogeneous Banach space]\label{eg:equivclss_of_cts_2pi_periodic_fns_is_a_hom_ban_sp}
Recall that
\begin{equation*}
[\mathcal{C}(\mathbb{T}, \mathbb{C})] \subseteq L_\infty(\mathbb{T}, \mathbb{C})
\end{equation*}
is a subset of $L_1(\mathbb{T}, \mathbb{C})$ and it is a linear manifold.
Furthermore, for $f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$, we have that
\begin{equation*}
\norm{[f]}_{\infty} = \norm{f}_{\sup}
\coloneqq \sup \{ \abs{f(\theta)} : \theta \in [-\pi, \pi) \},
\end{equation*}
and that $([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty})$
is a Banach space.
We shall show that it is, in fact, a homogeneous Banach space.
\begin{enumerate}
\item Let $[f] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]$. Then
\begin{equation*}
\norm{[f]}_1 = \frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{f}
\leq \frac{1}{2\pi} \int_{[-\pi, \pi)} \norm{f}_{\sup}
= \norm{f}_{\sup} = \norm{[f]}_{\infty}.
\end{equation*}
\item It is clear that $\xi_n(\theta) = e^{in\theta}$ is continuous
for each $\theta \in \mathbb{R}$ and so $\xi_n \in \mathcal{C}(\mathbb{T}, \mathbb{C})$.
Since $[\mathcal{C}(\mathbb{T}, \mathbb{C})]$ is a linear manifold,
it follows that $[\Trig(\mathbb{T}, \mathbb{C})] \subseteq
[\mathcal{C}(\mathbb{T}, \mathbb{C})]$.
\item If $f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$,
then it is clear that $\tau_s^{\circ}(f) \in \mathcal{C}(\mathbb{T},
\mathbb{C})$,
since a translation of $2\pi$-periodic continuous function
is still a $2\pi$-periodic continuous function.
Thus $[\mathcal{C}(\mathbb{T}, \mathbb{C})]$ is translation invariant.
\item Let $[f] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]$.
Then
\begin{align*}
\norm{\tau_s[f]}_{\infty}
&= \norm{[\tau_s^{\circ}(f)]}_{\infty}
= \norm{\tau_s^{\circ}(f)}_{\infty} \\
&= \sup \{ \abs{f(\theta - s)} : \theta \in \mathbb{R} \} \\
&= \sup \{ \abs{f(\theta)} : \theta \in \mathbb{R} \} \\
&= \norm{f}_{\sup} = \norm{[f]}_{\infty}.
\end{align*}
\item Let $[f] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]$, and wlog wma
$f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$.
Since $f$ is continuous, we have that for every $s \in \mathbb{R}$,
$\forall \epsilon > 0$, $\exists \delta > 0$ such that
$\forall s_0 \in \mathbb{R}$, if $\abs{s - s_0} < \delta$,
then $\abs{f(s) - f(s_0)} < \epsilon$.
In particular, for any $\theta \in \mathbb{R}$, since
$\abs{\theta - s - (\theta - s_0)} < \delta$, we have
\begin{equation*}
\abs{f(\theta - s) - f(\theta - s_0)} < \frac{\epsilon}{2}.
\end{equation*}
Now for any $s \in \mathbb{R}$, and any $\epsilon > 0$,
we may pick the same $\delta > 0$ so that for any $s_0 \in \mathbb{R}$,
we have
\begin{align*}
\norm{\Psi_{[f]}(s) - \Psi_{[f]}(s_0)}_{\infty}
&= \norm{\tau_s[f] - \tau_{s_0}[f]}_{\infty} \\
&= \norm{[\tau_s^{\circ}(f)] - [\tau_{s_0}^{\circ}(f)]}_{\infty} \\
&= \norm{\tau_s^{\circ}(f) - \tau_{s_0}^{\circ}(f)}_{\sup} \\
&= \norm{f(\theta - s) - f(\theta - s_0)} \\
&= \sup \{ \abs{f(\theta - s) - f(\theta - s_0)} : \theta \in \mathbb{R} \} \\
&\leq \sup \left\{ \frac{\epsilon}{2} : \theta \in \mathbb{R} \right\}
= \frac{\epsilon}{2} < \epsilon.
\end{align*}
It follows that $\Psi_{[f]}$ is indeed continuous
for every $s \in \mathbb{R}$.
\end{enumerate}
This concludes the proof that
$( [\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty} )$
is a homogeneous Banach space.
\end{eg}
\begin{eg}[$(L_p(\mathbb{T}, \mathbb{C}), \norm{\cdot}_p)$ is a homogeneous Banach space for $1 \leq p < \infty$]\label{eg:biglp_of_2pi_periodic_fns_is_a_hom_ban_sp}
Let $1 \leq p < \infty$. We shall show that
$(L_p(\mathbb{T}, \mathbb{C}), \norm{\cdot}_p)$ is
a homogeneous Banach space.
\begin{enumerate}
\item Let $f \in \mathcal{L}_p(\mathbb{T}, \mathbb{C})$,
and $q$ the Lebesgue conjugate of $p$, i.e. $\frac{1}{p} + \frac{1}{q} = 1$.
Recall from
\cref{propo:measurable_function_broken_down_into_an_absolute_part_and_a_scaling_part}
that there exists a measurable function $\rho : \mathbb{R} \to \mathbb{T}$
such that $f = \rho \abs{f}$. One may observe that by
\hyperref[thm:holders_inequality_lp_spaces]{Hölder's Inequality},
and the fact that $f$ itself is $2\pi$-periodic,
we have $\rho \in \mathcal{L}_q(\mathbb{T}, \mathbb{C})$.
Furthermore,
\begin{equation*}
\norm{[\rho]}_{q}
= \left( \frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{\rho}^q \right)^{1 / q}
\leq \left( \frac{1}{2\pi} \int_{[-\pi, \pi)} 1 \right)^{1 / q} = 1.
\end{equation*}
Most importantly, for us here,
$\norm{[\overline{\rho}]}_q = \norm{[\rho]}_q = 1$.
It follows, again, by Holder's Inequality, that
\begin{equation*}
\norm{[f]}_p = \left(
\frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{f \cdot \overline{\rho}}
\right)
\leq \norm{[f]}_p \norm{[\overline{\rho}]}_q \leq \norm{[f]}_p.
\end{equation*}
\item As a consequence of the last example, we observe that
\begin{equation*}
[\Trig(\mathbb{T}, \mathbb{C})] \subseteq [\mathcal{C}(\mathbb{T}, \mathbb{C})]
\subseteq L_p(\mathbb{T}, \mathbb{C}) \subseteq L_1(\mathbb{T}, \mathbb{C}).
\end{equation*}
\item The fact that the norm is finite in
next part makes the final conclusion.
\item Let $[f] \in L_p(\mathbb{T}, \mathbb{C})$ and $s \in \mathbb{R}$.
We observe that by \cref{lemma:swapping_convolutions},
\begin{align*}
\norm{\tau_s[f]}_p
&= \norm{[\tau_s^{\circ}(f)]}_p \\
&= \left( \frac{1}{2\pi} \int_{[-\pi, \pi)}
\abs{f(\theta - s)}^p \dm(s) \right)^{1 / p} \\
&= \left( \frac{1}{2\pi} \int_{[-\pi, \pi)}
\abs{f(s)}^p \dm(s) \right)^{1 / p} \\
&= \norm{[f]}_p < \infty.
\end{align*}
\item Let $[f] \in L_p(\mathbb{T}, \mathbb{C})$ and $s \in \mathbb{R}$.
\WTS $\forall \epsilon > 0$, $\exists \delta > 0$
such that $\forall s_0 \in \mathbb{R}$,
if $\abs{s - s_0} < \delta$, then
\begin{align*}
\norm{\Psi_{[f]}(s) - \Psi_{[f]}(s_0)}_p
&= \norm{\tau_s[f] - \tau_{s_0}[f]}_p \\
&= \norm{[\tau_s^{\circ}(f)] - [\tau_{s_0}^{\circ}(f)]}_p \\
&= \left( \frac{1}{2\pi} \int_{[-\pi, \pi)}
\abs{f(\theta - s) - f(\theta - s_0)} \dm(s) \right)^{1 / p}.
\end{align*}
We realize that we need to see if we can have $f(\theta - s)$
to be as close to $f(\theta - s_0)$ as possible under
the right circumstances.
Notice that $[\mathcal{C}(\mathbb{T}, \mathbb{C})]$ is dense in
$L_p(\mathbb{T}, \mathbb{C})$. Thus, we may find
$[g] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]$ such that
\begin{equation*}
\norm{[f] - [g]}_p < \frac{\epsilon}{3}.
\end{equation*}
Furthermore, we can pick this $g$ such that $\exists \delta > 0$
such that for $\abs{s - s_0} < \delta$, we have
\begin{equation*}
\norm{\tau_s^{\circ}(g) - \tau_{s_0}^{\circ}(g)}_\infty < \frac{\epsilon}{3},
\end{equation*}
and this is by the last example.
Note that
\begin{equation*}
\norm{\cdot}_1 \leq \norm{\cdot}_p \leq \norm{\cot}_\infty.
\end{equation*}
Thus by the same $\delta$, we have
\begin{align*}
\norm{\tau_s^{\circ}(f) - \tau_{s_0}^{\circ}(f)}_p
&\leq \norm{\tau_s^{\circ}(f) - \tau_s^{\circ}(g)}_p
+ \norm{\tau_s^{\circ}(g) - \tau_{s_0}^{\circ}(g)}_p \\
&\qquad+ \norm{\tau_{s_0}^{\circ}(g) - \tau_{s_0}^{\circ}(f)}_p \\
&< \frac{\epsilon}{3} + \frac{\epsilon}{3} + \frac{\epsilon}{3}
= \epsilon
\end{align*}
saving us the work of doing integration, and completing the proof.
\end{enumerate}
\end{eg}
\begin{eg}[$(L_\infty(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\infty})$ is not a homogeneous Banach space]
The situation for $p = \infty$ is different.
It checks out the first 4 conditions, but fails on the last;
translations under this norm is not continuous.
That sounds sensible, given how the norm is defined as a supremum
and not some nice elementary function, but we shall see
where exactly does it fall short.
\begin{enumerate}
\item This is an easy exercise: for $[f] \in L_\infty(\mathbb{T}, \mathbb{C})$,
\begin{equation*}
\norm{[f]}_1 = \frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{f}
\leq \frac{1}{2\pi} \int_{[-\pi, \pi)} \norm{f}_{\sup}
= \norm{f}_{\sup} = \norm{[f]}_{\infty}.
\end{equation*}
\item Again, by \cref{eg:equivclss_of_cts_2pi_periodic_fns_is_a_hom_ban_sp},
we have that
\begin{equation*}
[\Trig(\mathbb{T}, \mathbb{C})]
\subseteq [\mathcal{C}(\mathbb{T}, \mathbb{C})]
\subseteq L_\infty(\mathbb{T}, \mathbb{C}).
\end{equation*}
\item For $[f] \in L_\infty(\mathbb{T}, \mathbb{C})$ and $s \in \mathbb{R}$,
we have
\begin{align*}
\norm{\tau_s[f]}_{\infty}
&= \norm{[\tau_s^{\circ}(f)]}_\infty \\
&= \norm{f(\theta - s)}_{\sup} \\
&= \sup \{ \abs{f(\theta - s)} : \theta \in \mathbb{R} \} \\
&= \sup \{ \abs{f(\theta)} : \theta \in \mathbb{R} \} \\
&= \norm{f}_{\sup} = \norm{[f]}_\infty < \infty.
\end{align*}
\item The last part concluded with what we want.
\end{enumerate}
For the translation not being continuous, consider the function
\begin{equation*}
f_0 \coloneqq \chi_{[0, \pi)} \in \mathcal{L}_\infty([-\pi, \pi), \mathbb{C})
\end{equation*}
and let
\begin{equation*}
f = \check{f}_0 \in \mathcal{L}_\infty(\mathbb{T}, \mathbb{C})
\end{equation*}
be the $2\pi$-periodic extension of $f_0$.
For $-\pi < s < 0$, we see that
\begin{equation*}
\tau_s(f)(\theta) - \tau_0(f)(\theta) = 1 - 0 = 1
\end{equation*}
for all $\theta \in (s, 0)$,
and so
\begin{equation*}
\norm{\tau_s[f] - \tau_0[f]}_\infty = 1.
\end{equation*}
In particular,
\begin{equation*}
\lim_{s \to 0} \norm{\tau_s[f] - \tau_0[f]}_\infty = 1 \neq 0
\norm{\tau_0[f] - \tau_0[f]}_\infty,
\end{equation*}
i.e. even if $s$ is close to $0$, the translation does not
get any more continuous.
Thus, in particular, $s \mapsto \tau_s[f]$ is not continuous at $0$.
\end{eg}
Let $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ and $[f] \in L_1(\mathbb{T}, \mathbb{C})$.
We defined the convolution of $g$ and $[f]$
to be $g * [f] \coloneqq [g \diamond f]$, where
\begin{equation*}
g \diamond f(\theta) = \frac{1}{2\pi} \int_{[-\pi, \pi)}
g(s) f(\theta - s) \dm(s).
\end{equation*}
So we defined $g * [f]$ by first defining $g \diamond f$ pointwise,
using Lebesgue integration.
We showed in \cref{eg:biglp_of_2pi_periodic_fns_is_a_hom_ban_sp}
that $(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$ is
a homogeneous Banach space over $\mathbb{T}$.
Then by the 5\textsuperscript{th} condition in the
\hyperref[defn:homogeneous_banach_spaces]{definition}, the function
\begin{align*}
\beta : \mathbb{R} &\to L_1(\mathbb{T}, \mathbb{C}) \\
s &\mapsto g(s) \tau_s[f]
\end{align*}
is continuous.
By \cref{thm:continuous_functions_are_riemann_integrable},
\begin{equation*}
\frac{1}{2\pi} \int_{-\pi}^{\pi} \beta(s) \dif{s}
= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)\tau_s[f] \dif{s}
\end{equation*}
exists in $L_1(\mathbb{T}, \mathbb{C})$, and it is obtained
as an $\norm{\cdot}_1$-limit of Riemann sums
$(\beta, P_N, P_N^*) \in L_1(\mathbb{T}, \mathbb{C})$
using partitions $P_N$ of $[-2\pi, 2\pi]$
with corresponding choices $P_N^*$ of test values for $P_N$.
Fixing $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$,
we can define the map
\begin{align*}
\Gamma_g : L_1(\mathbb{T}, \mathbb{C}) &\to L_1(\mathbb{T}, \mathbb{C}) \\
[f] &\mapsto \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)\tau_s[f] \dif{s}.
\end{align*}
Notice that $\Gamma_g$ is linear: for $[f], [h] \in L_1(\mathbb{T}, \mathbb{C})$,
we have
\begin{align*}
\Gamma_g([f] + [h])
&= \Gamma([f + h]) \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s([f + h]) \dif{s} \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)[\tau_s^{\circ}(f + h)] \dif{s} \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)[f(\theta - s) + h(\theta - s)] \dif{s} \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)
([f(\theta - s)] + [h(\theta - s)]) \dif{s} \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)\tau_s[f] + g(s)\tau_s[h] \dif{s} \\
&= \Gamma_g([f]) + \Gamma_g([h]).
\end{align*}
One quickly realizes the resemblance of $\Gamma_g$ to
\hyperref[defn:convolution_operator_with_kernel]{$C_g$}.
After all, in particular,
\begin{equation*}
\tau_s[f] = [\tau_s^\circ(f)], \quad \text{ and } \quad
\tau_s^\circ(f)(\theta) = f(\theta - s),
\end{equation*}
for all $\theta \in \mathbb{R}$.
We shall make showing $\Gamma_g = C_g$ as our next goal, so that
for $[f] \in L_1(\mathbb{T}, \mathbb{C})$, we have
\begin{equation*}
C_g[f] = g * [f] = [g \diamond f]
= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s[f] \dif{s} = \Gamma_g[f].
\end{equation*}
This is, however, not an obvious or trivial result,
especially since the two constructions are entirely different;
one is an equivalence class of convolutions,
while the other is an integral of convolution-like expressions
but involving equivalence classes.
% section convolution_continued (end)
% chapter lecture_18_jul_11th_2019 (end)
\chapter{Lecture 19 Jul 16th 2019}%
\label{chp:lecture_19_jul_16th_2019}
% chapter lecture_19_jul_16th_2019
\section{Convolution (Continued 2)}%
\label{sec:convolution_continued_2}
% section convolution_continued_2
By \cref{eg:equivclss_of_cts_2pi_periodic_fns_is_a_hom_ban_sp},
it follows that if $f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$, then
the map $s \mapsto \tau_s[f]$, or equivalently $s \mapsto \tau_s^\circ(f)$
is continuous from $(\mathbb{T}, \abs{\cdot})$ to
$(\mathcal{C}(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\sup})$.
\begin{lemma}[Pointwise Value of $\Gamma_g$]\label{lemma:pointwise_value_of_gamma_g}
Let $f, g \in (\mathcal{C}(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\sup})$.
Let
\begin{equation*}
\Gamma_g^\circ(f)
\coloneqq \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)\tau_s^{\circ}(f) \dif{s},
\end{equation*}
taken as a Banach space Riemann integral
in $(\mathcal{C}(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\sup})$.
Then
\begin{equation*}
\Gamma_g^{\circ}(f)(\theta) = g \diamond f(\theta)
= \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s)f(\theta - s) \dm(s)
\end{equation*}
for all $\theta \in \mathbb{R}$.
\end{lemma}
\begin{strategy}
The most difficult part of this proof is to understand the difference
between $\Gamma_g^\circ(f)$ and $g \diamond f$.
For $\Gamma_g^{\circ}(f)$,
since $(\mathcal{C}(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\sup})$ is a Banach space,
and $\beta : \mathbb{R} \to \mathcal{C}(\mathbb{T}, \mathbb{C})$ given by
$\beta(s) \coloneqq g(s)f\tau_s^{\circ}(f) \in \mathcal{C}(\mathbb{T}, \mathbb{C})$
is continuous, by \cref{thm:continuous_functions_are_riemann_integrable},
$\Gamma_g^{\circ}(f)$ is a $\norm{\cdot}_{\sup}$-limit
of Riemann sums $S(\beta, P_N, P_n^*)$.
We may further, wlog, suppose that for each $N \geq 1$,
$P_N \in \mathcal{P}([-\pi, \pi])$ is a regular partition of $[-\pi, \pi]$
into $2^N$ subintervals of equal length $\frac{2\pi}{2^N}$,
and we may pick $P_N^* = P_N \setminus \setminus \{ - \pi \}$
so that $P_N^*$ is a set of test values for $P_N$.
On the other hand, $g \diamond f$ is the convolution of $g$ and $f$,
which was
\hyperref[defn:convolution_on_romanl_1_mathbb_t_mathbb_c_]{defined pointwise}
via Lebesgue integration.
\end{strategy}
\begin{proof}
For a fixed $\theta_0 \in \mathbb{R}$, we may define
$\gamma_{\theta_0} : \mathbb{R} \to \mathbb{K}$ as
\begin{equation*}
\gamma_{\theta_0}(s) = g(s)f(\theta_0 - s), \quad s \in \mathbb{R}.
\end{equation*}
Since both $g$ and $f$ are continuous and $2\pi$-periodic,
$\gamma_{\theta_0}$ is also continuous and $2\pi$-periodic.
Thus both sides are bounded and Riemann integrable on $[-\pi, \pi)$.
By \cref{thm:bounded_riemann_integrable_functions_are_lebesgue_integrable},
we have
\begin{align*}
g \diamond f(\theta_0)
&= \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s)f(\theta_0 - s) \dm(s) \\
&= \frac{1}{2\pi} \int_{[-\pi \pi)} \gamma_{\theta_0}(s) \dm(s) \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} \gamma_{\theta_0}(s) \dif{s} \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)f(\theta_0 - s) \dif{s}.
\end{align*}
Since $(\mathbb{C}, \abs{\cdot})$ is a Banach space,
\cref{thm:continuous_functions_are_riemann_integrable}, with the same $P_N$
and $P_N^*$ as defined in our strategy, we have
\begin{equation*}
g \diamond f(\theta_0) = \frac{1}{2\pi} \int_{-\pi}^{\pi} \gamma(s) \dif{s}
= \lim_{N \to \infty} S(\gamma, P_N, P_N^*).
\end{equation*}
Finally,
\begin{align*}
&\norm{\Gamma_g^{\circ}(f) - S(\beta, P_N, P_N^*)}_{\sup} \\
&\geq \abs{\Gamma_g^{\circ}(f)(\theta_0) - S(\beta, P_N, P_N^*)(\theta_0)} \\
&= \abs{\Gamma_g^\circ(f)(\theta_0) -
\sum_{n=1}^{2^N} (\beta(p_n))(\theta_0)(p_n - p_{n-1})} \\
&= \abs{\Gamma_g^\circ(f)(\theta_0) -
\sum_{n=1}^{2^N} (g(p_n)f(\theta_0 - p_n))(p_n - p_{n-1})} \\
&= \abs{\Gamma_g^\circ(f)(\theta_0) -
\sum_{n=1}^{2^N} \gamma_{\theta_0}(s)(p_n - p_{n-1})} \\
&= \abs{\Gamma_g^{\circ}(f) - S(\gamma, P_N, P_N^*)}
\end{align*}
Then, since
$\lim_{N \to \infty} \norm{\Gamma_g^\circ(f) - S(\beta, P_N, P_N^*)} = 0$,
it follows that
\begin{equation*}
\lim_{N \to \infty} \abs{\Gamma_g^{\circ}(f) - S(\gamma, P_N, P_N^*)} = 0.
\end{equation*}
Thus
\begin{equation*}
\Gamma_g^\circ(f)(\theta_0) = \lim_{N \to \infty} S(\gamma, P_N, P_N^*)
= g \diamond f(\theta_0).
\end{equation*}
Since $\theta_0 \in \mathbb{R}$ was arbitrary, we indeed have
\begin{equation*}
\Gamma_g^\circ(f) = g \diamond f.
\end{equation*}
\end{proof}
\begin{thm}[Equivalence of $\Gamma_g$ and $C_g$]\label{thm:equivalence_of_gamma_g_and_c_g_}
Let $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$
and $[f] \in L_1(\mathbb{T}, \mathbb{C})$.
Let $\Gamma_g$ be as defined before; i.e.
\begin{equation*}
\Gamma_g[f] \coloneqq \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s[f] \dif{s},
\end{equation*}
where the integral is a Banach space Riemann integral in
$(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$.
Then
\begin{equation*}
\Gamma_g[f] = g * [f] = [g \diamond f] = C_g[f].
\end{equation*}
\end{thm}
\begin{proof}
By the $\norm{\cdot}_1$-density of
$[\mathcal{C}(\mathbb{T}, \mathbb{C})]$ in $L_1(\mathbb{T}, \mathbb{C})$,
we can make use of \cref{lemma:pointwise_value_of_gamma_g}.
In particular, we can find a sequence $(f_m)_{m=1}^{\infty}$
in $\mathcal{C}(\mathbb{T}, \mathbb{C})$ such that
\begin{equation*}
\lim_{n \to \infty} \norm{[f_m] - [f]}_1 = 0.
\end{equation*}
Thus, for each $m \geq 1$, we have
\begin{equation*}
\Gamma_g[f_m] = \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s[f_m] \dif{s}.
\end{equation*}
Since $f_m \in \mathcal{C}(\mathbb{T}, \mathbb{C})$, for each $m \geq 1$,
the map $s \mapsto g(s) \tau_s^\circ(f_m)$ is continuous,
thus
\begin{equation*}
\Gamma_g^\circ(f_m) = \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s^\circ(f_m) \dif{s}
\end{equation*}
converges in $(\mathcal{C}(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\sup})$ by the last lemma.
In particular, for an appropriate sequence $(P_N)_N$
of partitions of $[-\pi, \pi]$, we have that
\begin{equation*}
\Gamma_g^\circ(f_m) = \lim_{N \to \infty} S(\beta_m, P_N, P_N^*),
\end{equation*}
where $\beta_m = g(s) \tau_s^\circ(f_m)$, for $s \in [-\pi, \pi)$.
But given any Riemann sum $S(\beta_m, Q, Q^*)$ of the form
\begin{equation*}
\sum_{k=1}^{M} \beta_m(q_k^*)(q_k - q_{k-1})
= \sum_{k=1}^{M} g(q_k^*)\tau_{q_k^*}^\circ(f_m)(q_k - q_{k-1})
\end{equation*}
in $\mathcal{C}(\mathbb{T}, \mathbb{C})$, its image in
$[\mathcal{C}(\mathbb{T}, \mathbb{C})]$ is
\begin{equation*}
[S(\beta_m, Q, Q^*)] = \sum_{k=1}^{M} g(q_k^*) \tau_{q_k^*}[f_m](q_k - q_{k-1}).
\end{equation*}
Since $h \mapsto [h]$ from
$(\mathcal{C}(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\sup})$ to
$([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty})$
is a bijective linear isometry, the image of $\Gamma_g^\circ(f_m)$
under this map is
\begin{equation*}
[\Gamma_g^\circ(f_m)] = \lim_{N \to \infty} [S(\beta_m, P_N, P_N^*)],
\end{equation*}
and this convergence is wrt the $\norm{\cdot}_{\infty}$-norm.
On the other hand, by the definition of each $[S(\beta_m, P_N, P_N^*)]$,
we have, precisely,
\begin{equation*}
\lim_{N \to \infty} [S(\beta_m, P_N, P_N^*)] = \Gamma_g([f_m])
\in ([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty}),
\end{equation*}
thus
\begin{equation*}
[\Gamma_g^\circ(f_m)] = \Gamma_g[f_m], \quad m \geq 1.
\end{equation*}
Now
\begin{equation*}
[S(\beta_m,P_N,P_N^*)]
\in [\mathcal{C}(\mathbb{T},\mathbb{C})] \subseteq L_1(\mathbb{T},\mathbb{C})
\end{equation*}
and
\begin{equation*}
\Gamma_g[f_m]
\in [\mathcal{C}(\mathbb{T},\mathbb{C})] \subseteq L_1(\mathbb{T},\mathbb{C}).
\end{equation*}
Since $[\mathcal{C}(\mathbb{T},\mathbb{C})]$ is a homogeneous Banach space,
$\norm{[h]}_1 \leq \norm{[h]}_{\infty}$ for all $[h] \in
[\mathcal{C}(\mathbb{T},\mathbb{C})]$.
Thus
\begin{align*}
0 &\leq \lim_{N \to \infty}
\norm{[\Gamma_g^{\circ}(f_m)] - [S(\beta_m,P_N,P_N^*)]}_1 \\
&\leq \lim_{N \to \infty}
\norm{[\Gamma_g^\circ(f_m)] - [S(\beta_m,P_N,P_N^*)]}_{\infty}
= 0,
\end{align*}
and so
\begin{equation*}
\Gamma_g[f_m] = [\Gamma_g^\circ(f_m)]
= \lim_{N \to \infty} [S(\beta_m, P_N, P_N^*)],
\end{equation*}
where the convergence happens in $(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$.
\noindent
\hlbnoted{Step 1} \WTS $\forall m \geq 1$, $\Gamma_g[f_m] = g * [f_m]$.
By \cref{lemma:pointwise_value_of_gamma_g}, $\forall m \geq 1$,
we have $\Gamma_g^\circ(f_m) = g \diamond f_m$.
Thus
\begin{equation*}
\Gamma_g[f_m] = [\Gamma_g^\circ(f_m)] = [g \diamond f_m] = g * [f_m]
\end{equation*}
for $m \geq 1$.
\noindent
\hlbnoted{Step 2} \WTS $g * [f] = \lim_{m \to \infty} g * [f_m]$
in $(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$.
One way we can show this is by realizing that we want
\begin{equation*}
0 = \lim_{m \to \infty} (g * [f_m] - g * [f])
= \lim_{m \to \infty} [g \diamond f_m - g \diamond f],
\end{equation*}
and for $\theta \in \mathbb{R}$,
\begin{align*}
(g \diamond f_m - g \diamond f)(\theta)
&= \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s)(f_m(\theta - s) - f(\theta - s)) \dm(s) \\
&= g \diamond (f_m - f)(\theta).
\end{align*}
As noted after \cref{defn:convolution}, we have
\begin{equation*}
\abs{g \diamond (f_m - f)(\theta)} \leq \norm{g}_{\sup} \norm{[f_m] -
[f]}_1.
\end{equation*}
Thus for $m \geq 1$,
\begin{align*}
\norm{g * [f_m] - g * [f]}_1
&= \norm{g * [f_m - f]}_1 \\
&= \frac{1}{2\pi} \int_{[-\pi, \pi)} \abs{g \diamond (f_m - f)(\theta)} \dm(\theta) \\
&\leq \frac{1}{2\pi} \int_{[-\pi, \pi)} \norm{g}_{\sup} \norm{[f_m] - [f]}_1 \dm(\theta) \\
&= \norm{g}_{\sup} \norm{[f_m] - [f]}_1.
\end{align*}
Since
\begin{equation*}
\lim_{m \to \infty} \norm{[f_m] - [f]}_1 = 0,
\end{equation*}
\sidenote{I bet you forgot this! :P} it follows that
\begin{equation*}
g * [f] = \lim_{m \to \infty} g * [f_m]
\end{equation*}
in $(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$.
\noindent
\hlbnoted{Step 3} \WTS $\Gamma_g[f] = \lim_{m \to \infty} \Gamma_g[f_m]$
in $(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$.
We see that by the properties of a homogeneous Banach space,
we have
\begin{align*}
\norm{\Gamma_g[f] - \Gamma_g[f_m]}_1
&= \norm{ \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)\tau_s[f - f_m] \dif{s} }_1 \\
&\leq \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \norm{\tau_s[f - f_m]}_1 \dif{s} \\
&\leq \norm{g}_{\sup} \norm{\tau_s[f - f_m]}_1 \\
&= \norm{g}_{\sup} \norm{[f] - [f_m]}_1.
\end{align*}
As before, it follows that
\begin{equation*}
\Gamma_g[f] = \lim_{m \to \infty} \Gamma_g[f_m] \quad
\in (L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1).
\end{equation*}
\noindent
\hlbnoted{Step 4} Finally, we see that
\begin{equation*}
\Gamma_g[f] = \lim_{m \to \infty} \Gamma_g[f_m]
= \lim_{m \to \infty} g * [f_m]
= g * [f].
\end{equation*}
\end{proof}
Viewing $\Gamma_g$ as a map from
$L_1(\mathbb{T}, \mathbb{C})$ onto itself,
we finally conclude our gruesome path into showing that $\Gamma_g = C_g$.
Thus, our 2 ``notions'' of ``convolutions'' agree.
In fact, when $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$,
we may define
\begin{align*}
\Gamma_g^{\mathfrak{B}} : \mathfrak{B} &\to \mathfrak{B} \\
[f] &\mapsto \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s[f] \dif{s}
\end{align*}
as a map on \hlbnotec{any} homogeneous Banach space $\mathfrak{B}$
over $\mathbb{T}$.
Furthermore, $C_g[f] = [g \diamond f] \in \mathfrak{B}$.
Let us show that the above function always agree with convolution.
\begin{thm}[Riemannian Version of Convolution on Homogeneous Banach Spaces]\label{thm:riemannian_version_of_convolution_on_homogeneous_banach_spaces}
Let $(\mathfrak{B}, \norm{\cdot}_{\mathfrak{B}})$ be a homogeneous
Banach space over $\mathbb{T}$, $[f] \in \mathfrak{B}$,
and $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$.
Then
\begin{equation*}
\frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s[f] \dif{s}
\end{equation*}
converges in $\mathfrak{B}$. Furthermore,
\begin{enumerate}
\item
\begin{equation*}
g * [f] = \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s[f] \dif{s},
\end{equation*}
where $g * [f] = [g \diamond f]$, and where for all $\theta \in \mathbb{R}$,
\begin{equation*}
g \diamond f(\theta)
= \frac{1}{2\pi} \int_{[-\pi, \pi)} g(s) f(\theta - s) \dm(s).
\end{equation*}
That is, $\Gamma_g^{\mathfrak{B}}[f] = g * [f]$.
\item We also have
\begin{equation*}
\norm{g * [f]}_{\mathfrak{B}} \leq \nu_1(g) \norm{[f]}_{\mathfrak{B}}.
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\begin{enumerate}
\item Since $(\mathfrak{B}, \norm{\cdot}_{\mathfrak{B}})$ is a Banach space,
and for $[f] \in \mathfrak{B}$, the function
$\beta : \mathbb{R} \to \mathfrak{B}$ such that
\begin{equation*}
\beta(s) \coloneqq g(s) \tau_s[f]
\end{equation*}
is continuous, by \cref{thm:continuous_functions_are_riemann_integrable},
\begin{equation*}
\Gamma_g^{\mathfrak{B}}[f]
\coloneqq \frac{1}{2\pi} \int_{-\pi}^{\pi} \beta(s) \dif{s}
= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s) \tau_s[f] \dif{s}
\end{equation*}
exists in $\mathfrak{B}$.
As before, wlog wma $P_N \in \mathcal{P}([-\pi, \pi))$ is a regular
partition into $2^N$ subintervals of equal length,
and if we set $P_N^* = P_N \setminus \{ \pi \}$ as the corresponding set
of test values of $P_N$, then
\begin{equation*}
\lim_{N \to \infty}
\norm{ \Gamma_g^{\mathfrak{B}}[f] - S(\beta,P_N,P_N^*) }_{\mathfrak{B}}
= 0.
\end{equation*}
Since $\norm{[h]}_1 \leq \norm{[h]}_{\mathfrak{B}}$ for all
$[h] \in \mathfrak{B}$, since $\mathfrak{B}$ is a homogeneous Banach
space.
Thus
\begin{equation*}
\lim_{N \to \infty} \norm{\Gamma_g^{\mathfrak{B}}[f] - S(\beta,P_N,
P_N^*) }_1 = 0.
\end{equation*}
Hence
\begin{equation*}
\Gamma_g^{\mathfrak{B}}[f]
= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)\tau_s[f] \dif{s}
\end{equation*}
in $(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$.
Phrased differently, we have $\Gamma_g^{\mathfrak{B}}[f] =
\Gamma_g^{L_1(\mathbb{T}, \mathbb{C})}[f]$.
It follows by
\cref{thm:equivalence_of_gamma_g_and_c_g_}
that
\begin{equation*}
\Gamma_f^{\mathfrak{B}}[f] = \Gamma_g^{L_1(\mathbb{T}, \mathbb{C})}[f]
= g * [f].
\end{equation*}
\item Recall that given any homogeneous Banach space over $\mathbb{T}$,
we defined the continuous map $\Psi_{[f]} : \mathbb{R} -> \mathfrak{B}$
such that $\Psi_{[f]}(s) = \tau_s[f]$. Notice that
since $\mathfrak{B}$ is a homogeneous Banach space,
\begin{equation*}
\norm{\Psi_{[f]}(s)}_{\mathfrak{B}} = \norm{\tau_s[f]}_{\mathfrak{B}}
= \norm{[f]}_{\mathfrak{B}}.
\end{equation*}
Thus, observe that
\begin{align*}
\norm{g * [f]}_{\mathfrak{B}}
&= \frac{1}{2\pi} \norm{ \int_{-\pi}^{\pi} g(s)\tau_s[f] \dif{s}}_{\mathfrak{B}} \\
&= \frac{1}{2\pi} \norm{ \int_{-\pi}^{\pi} g(s) \Psi_{[f]}(s) \dif{s} }_{\mathfrak{B}} \\
&\leq \frac{1}{2\pi} \int_{-\pi}^{;p} \abs{g(s)}
\norm{\Psi_{[f]}(s)}_{\mathfrak{B}} \dif{s} \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} \abs{g(s)} \norm{[f]}_{\mathfrak{B}} \dif{s} \\
&= \norm{[f]}_{\mathfrak{B}} \nu_1(g).
\end{align*}
\end{enumerate}
\end{proof}
\begin{remark}
The first result in
\cref{thm:riemannian_version_of_convolution_on_homogeneous_banach_spaces}
is stronger than it seems.
In
\cref{propo:continuity_of_the_convolution_of_f_and_g_where_g_is_continuous},
we showed that if $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ and
$f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$, then
$g \diamond f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$, and so
$g * [f] \coloneqq [g \diamond f] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]$.
Then why is $g * [f] \in \mathfrak{B}$?
There is no reason why $\mathfrak{B}$ should contain all continuous functions,
although it does contain all trigonometric functions.
What we have shown is that $g * [f] \in \mathfrak{B}$
\hlbnotec{even if $\mathfrak{B}$ does not contain $[\mathcal{C}(\mathbb{T}, \mathbb{C})]$}.
In other words, convolutions (at least by a continuous function)
keeps us in this smaller space $\mathfrak{B}$.
\end{remark}
\begin{thm}[Convolution as a Normalizer]\label{thm:convolution_as_a_normalizer}
Let $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$, and let
\begin{equation*}
C_g : ([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty})
\to ([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty}),
\end{equation*}
as defined in \cref{defn:convolution_operator_with_kernel},
corresponding to $g$, so that $C_g[h] = g * [h]$ by
\cref{thm:equivalence_of_gamma_g_and_c_g_}.
Then $\norm{C_g} = \nu_1(g) = \norm{[g]}_1$.
\end{thm}
\begin{proof}
By part 2 of
\cref{thm:riemannian_version_of_convolution_on_homogeneous_banach_spaces},
for $[f] \in ([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty})$,
\begin{equation*}
\norm{C_g[f]}_{\infty} = \norm{g * [f]}_{\infty}
\leq \nu_1(g) \norm{[f]}_{\infty},
\end{equation*}
so $\norm{C_g} \leq \nu_1(g)$.
Let $f \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ with $\norm{[f]}_{\infty} \leq
1$.
Then $g * [f] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]$ so that
\begin{equation*}
\norm{C_g[f]}_{\infty} = \norm{g * [f]}_{\infty} = \norm{g \diamond f}_{\sup}
\geq \nu_1(g).
\end{equation*}
Then by
\cref{thm:bounded_riemann_integrable_functions_are_lebesgue_integrable},
we see that
\begin{align*}
g \diamond f(0) &= \frac{1}{2} \int_{[-\pi, \pi)} g(s) f(0 - s) \dm(s) \\
&= \frac{1}{2\pi} \int_{-\pi}^{\pi} g(s)f(-s) \dif{s}
\end{align*}
\end{proof}
% section convolution_continued_2 (end)
% chapter lecture_19_jul_16th_2019 (end)
\chapter{Lecture 20 Jul 18th 2019}%
\label{chp:lecture_20_jul_18th_2019}
% chapter lecture_20_jul_18th_2019
\section{Convolution (Continued 3)}%
\label{sec:convolution_continued_3}
% section convolution_continued_3
Let us establish a similar result for convolution
by a continuous function $g$ acting on $L_1(\mathbb{T}, \mathbb{C})$.
\begin{thm}[Convolution Operator for $g$ on $L_1(\mathbb{T}, \mathbb{C})$]\label{thm:convolution_operator_for_g_on_l_1_mathbb_t_mathbb_c_}
Let $g \in \mathcal{C}(\mathbb{T}, \mathbb{C})$, and
\begin{equation*}
C_g : L_1(\mathbb{T}, \mathbb{C}) \to L_1(\mathbb{T}, \mathbb{C})
\end{equation*}
be the convolution operator corresponding to $g$, so that $C_g[f] = g * [f]$.
Then $\norm{C_g} = \nu_1(g) = \norm{[g]}_1$.
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
We shall next explore the connection between convolution operators
and convergence of Fourier series.
% section convolution_continued_3 (end)
\section{The Dirichlet Kernel}%
\label{sec:the_dirichlet_kernel}
% section the_dirichlet_kernel
Recall that given $[f] \in L_2(\mathbb{T}, \mathbb{C})$, the sequence
$(\Delta_N([f]))_{N=1}^{\infty}$ of partial sums of the Fourier series
of $[f]$ converges in the $\norm{\cdot}_2$-norm to $[f]$.
We want to see how far we can extend the same result for
$[f] \in L_1(\mathbb{T}, \mathbb{C})$.
Our little (not so little) impasse into convolution is actually somewhat important
to the so-called Dirichlet kernel, of which we shall see below,
and why that's important is also what we shall quite immediately see.
\begin{defn}[Dirichlet Kernel of Order $N$]\index{Dirichlet Kernel of Order $N$}\label{defn:dirichlet_kernel_of_order_n_}
For each $n \in \mathbb{Z}$,
recall that $\xi_n \in \mathcal{C}(\mathbb{T}, \mathbb{C})$
is the function $\xi_n(\theta) = e^{in \theta}$.
For $N \geq 1$, we define the \hlnoteb{Dirichlet kernel of order $N$} as
\begin{equation*}
D_N = \sum_{n = -N}^{N} \xi_n.
\end{equation*}
\end{defn}
\begin{note}
Again, the word `kernel' has nothing to do
with the null space of any linear map.
\end{note}
Let $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$.
For each $N \geq 1$, define
\begin{equation*}
\Delta_N^\circ(f) \coloneqq \sum_{n = -N}^{N} \alpha_n^{[f]} \xi_n
= \sum_{n = -N}^{N} \hat{f}(n) \xi_n.
\end{equation*}
It is clear that $\Delta_N^\circ(f) \in \mathcal{C}(\mathbb{T}, \mathbb{C})$,
since it is a finite linear combination of $\{ \xi_n \}_{n = -N}^{N} \subseteq
\mathcal{C}(\mathbb{T}, \mathbb{C})$.
If $f = g$ a.e. on $\mathbb{R}$,
we saw that $\alpha_n^{[f]} = \alpha_n^{[g]}$ for all $n \in \mathbb{Z}$,
which then $\Delta_N^\circ(f) = \Delta_N^\circ(g)$ for $N \geq 1$.
Thus, we may define
\begin{equation*}
\Delta_N([f]) = [\Delta_N^\circ(f)], \quad N \geq 1.
\end{equation*}
Hence, $\Delta_N([f])$ is the $N$\textsuperscript{th}
partial sum of the Fourier series of $[f]$.
In the case of $[f] \in L_2(\mathbb{T}, \mathbb{C})$,
this definition coincides with our
\hyperref[eg:fourier_for_bigl2_in_complex_unit_circle]{previous definition}.
For $N \geq 1$, $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$
and $\theta \in \mathbb{R}$, we have
\begin{align*}
\Delta_N^\circ(f)(\theta)
&= \sum_{n = -N}^{N} \alpha_n^{[f]} \xi_n \\
&= \sum_{n=-N}^{N} \left( \frac{1}{2\pi} \int_{[-\pi, \pi)}
f(s) \overline{\xi_n}(s) \dm(s) \right) \xi_n(\theta) \\
&= \sum_{n=-N}^{N} \frac{1}{2\pi} \int_{[-\pi, \pi)} f(s) e^{in(\theta - s)} \dm(s) \\
&= \sum_{n=-N}^{N} \frac{1}{2\pi} \int_{[-\pi, \pi)} f(\theta - s) e^{ins} \dm(s)
\enspace \because \text{\cref{lemma:swapping_convolutions}} \\
&= \frac{1}{2\pi} \int_{[-\pi, \pi)} \sum_{n=-N}^{N}
f(\theta - s) e^{ins} \dm(s) \\
&= \frac{1}{2\pi} \int_{[-\pi ,\pi)} D_N(s) f(\theta - s) \dm(s) \\
&= (D_N \diamond f)(\theta).
\end{align*}
Thus $\Delta_N^\circ(f) = D_N \diamond f$, or
\begin{equation*}
\Delta_N([f]) = D_N * [f] = C_{D_N}([f]), \quad N \geq 1.
\end{equation*}
We expressed the $N$\textsuperscript{th} partial sum of the Fourier series
of $[f] \in L_1(\mathbb{T}, \mathbb{C})$ as the
\hlbnotee{convolution of the Dirichlet kernel $D_N$ of order $N$
with $[f]$}.
\sidenote{Our sweats and tears ploughing through the convoluted lands
of convolutions is not confounded!}
The question of whether or not these partial sums converge to $[f]$
in $L_1(\mathbb{T}, \mathbb{C})$ is now a question of
whether or not $\lim_{N \to \infty} C_{D_N}([f]) = [f]$ in
$L_1(\mathbb{T}, \mathbb{C})$.
\begin{thm}[Properties of the Dirichlet Kernel]\label{thm:properties_of_the_dirichlet_kernel}
Let $N \geq 1$ be an integer and $D_N$ be the
Dirichlet kernel of order $N$. Then
\begin{enumerate}
\item $D_N(-\theta) = D_N(\theta) \in \mathbb{R}$ for all $\theta \in \mathbb{R}$;
\item $\frac{1}{2\pi} \int_{-\pi}^{\pi} D_N(\theta) \dif{\theta} = 1$;
\item For $0 \neq \theta \in [-\pi, \pi)$,
\begin{equation*}
D_N(\theta) = \frac{\sin(\left(N + \frac{1}{2}\right)\theta)}
{\sin \left( \frac{1}{2} \theta \right)}.
\end{equation*}
Also, $D_N(0) = 2N + 1$.
\item $\norm{[D_N]}_1 = \nu_1(D_N) \geq \frac{4}{\pi^2} \sum_{n=1}^{N} \frac{1}{n}$.
\end{enumerate}
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
\newthought{On the right} are some graphs of $D_N$,
particularly for $D_2$, $D_5$ and $D_{10}$.
\begin{marginfigure}
\centering
\includesvg[width=\marginparwidth]{images/graphofD2.svg}
\caption{Graph of $D_2$}\label{fig:graph_of_d_2_}
\includesvg[width=\marginparwidth]{images/graphofD5.svg}
\caption{Graph of $D_5$}\label{fig:graph_of_d_5_}
\includesvg[width=\marginparwidth]{images/graphofD10.svg}
\caption{Graph of $D_{10}$}\label{fig:graph_of_d_10_}
\end{marginfigure}
Two things worth noticing here are that
\begin{itemize}
\item the amplitude of the function is increasing near $0$;
this is clear from $D_n$ being continuous and $D_N(0) = 2N + 1$
for $N \geq 1$; and
\item each $D_N$ has lots of fluctuations between positive and negative
values, which accounts for the fact that the integrals of $D_N$
are bounded, while the integrals of $\abs{D_N}$ are not.
\end{itemize}
The next result follows from
\cref{thm:riemannian_version_of_convolution_on_homogeneous_banach_spaces}
and
\cref{thm:convolution_as_a_normalizer}, and
along with the divergence of the harmonic series $\sum_{n=1}^{\infty} \frac{1}{n}$
as we let $N \to \infty$ for the 4\textsuperscript{th} result in
\cref{thm:properties_of_the_dirichlet_kernel}.
\begin{crly}[Unboundedness of Convolution Operators for the Dirichlet Kernel]\label{crly:unboundedness_of_convolution_operators_for_the_dirichlet_kernel}
For each $N \geq 1$, let $D_N$ denote the Dirichlet kernel of order $N$.
\begin{enumerate}
\item If $C_{D_N} \in \mathcal{B}([\mathcal{C}(\mathbb{T}, \mathbb{C})],
\norm{\cdot}_{\infty})$ is the convolution operator
corresponding to $D_N$, for $N \geq 1$, then
\begin{equation*}
\lim_{N \to \infty} \norm{C_{D_N}} = \infty.
\end{equation*}
\item If $C_{D_N} \in \mathcal{B}(L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)$
is the convolution operator corresponding to $D_N$, for $N \geq 1$, then
\begin{equation*}
\lim_{N \to \infty} \norm{C_{D_N}} = \infty.
\end{equation*}
\end{enumerate}
\end{crly}
We are already seeing some bad signs of things not working out nicely.
Let's push a little bit further.
To exploit the connection between the Dirichlet kernel and convolution,
we require a few results from real analysis.
\begin{defn}[Nowhere Dense]\index{Nowhere Dense}\label{defn:nowhere_dense}
Let $(X, d)$ be a metric space and $H \subseteq X$.
We say that $H$ is \hlnoteb{nowhere dense} (or \hldefn{meager}, or \hldefn{thin})
if $G \coloneqq X \setminus \overline{H}$ is dense in $X$.
In other words, the interior of $\overline{H}$ is empty.
\end{defn}
\begin{eg}
We usually think of nowhere dense subsets of metric spaces as
being ``small'', as the alternate terminologies ``meager'' and ``thin''
suggest.
\begin{enumerate}
\item The set $H = \mathbb{Z}$ is nowhere dense in $\mathbb{R}$;
which is easily verifiable.
\item The Cantor set $C$ is nowhere dense in $X = [0, 1]$,
equipped with the standard metric inherited from $\mathbb{R}$.
\item The set $H = \mathbb{Q}$ of rational numbers is \hlnotec{not}
nowhere dense in $\mathbb{R}$, since $X \setminus \overline{H} =
\mathbb{R} \setminus \mathbb{R} = \emptyset$.
\end{enumerate}
\end{eg}
\begin{defn}[First and Second Category]\index{First Category}\index{Second Category}\label{defn:first_and_second_category}
We say that a subset $H$ of a metric space $(X, d)$ is
of the \hlnoteb{first category} in $(X, d)$ if there exists
a sequence $(F_n)_n$ of closed, nowhere dense sets in $X$
such that
\begin{equation*}
H \subseteq \bigcup_{n=1}^{\infty} F_n.
\end{equation*}
Otherwise, we say that $H$ is of \hlnoteb{second category}.
\end{defn}
The reader should be familiar with the following result.
\begin{defn}[Baire Category Theorem]\index{Baire Category Theorem}\label{defn:baire_category_theorem}
A complete metric space $(X, d)$ is of the second category.
That is, $X$ is \hlbnotee{not a countable union of closed, nowhere dense sets
in $X$}.
\end{defn}
% section the_dirichlet_kernel (end)
% chapter lecture_20_jul_18th_2019 (end)
\chapter{Lecture 21 Jul 23rd 2019}%
\label{chp:lecture_21_jul_23rd_2019}
% chapter lecture_21_jul_23rd_2019
\section{The Dirichlet Kernel (Continued)}%
\label{sec:the_dirichlet_kernel_continued}
% section the_dirichlet_kernel_continued
The second result which we shall require is the following.
\begin{thm}[Banach-Steinhaus Theorem (aka The Uniform Boundedness Principle)]\index{Banach-Steinhaus Theorem}\index{The Uniform Boundedness Principle}\label{thm:banach_steinhaus_theorem}
Let $(X, d)$ be a complete metric space and
$\emptyset \neq \mathcal{F} \subseteq \mathcal{C}(X, \mathbb{R})$.
Suppose that $\forall x \in X$, $\exists \kappa_x > 0$ a constant such that
\begin{equation*}
\abs{f(x)} \leq \kappa_x,\quad f \in \mathcal{F}.
\end{equation*}
Then there exists an open set $G \subseteq X$ and $\kappa > 0$ such that
\begin{equation*}
\abs{f(x)} \leq \kappa,\quad x \in G, \enspace f \in \mathcal{F}.
\end{equation*}
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
There is a stronger version of the Banach-Steinhaus Theorem
that applies to linear operators in Banach spaces.
\begin{thm}[Banach-Steinhaus Theorem for Operators (aka The Uniform Boundedness Principle for Operators)]\index{Banach-Steinhaus Theorem for Operators}\index{The Uniform Boundedness Principle for Operators}\label{thm:banach_steinhaus_theorem_for_operators}
Let $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ and
$(\mathfrak{Y}, \norm{\cdot}_{\mathfrak{Y}})$ be Banach spaces, and suppose that
$\emptyset \neq \mathcal{F} \subseteq \mathcal{B}(\mathfrak{X}, \mathfrak{Y})$.
Let $H \subseteq \mathfrak{X}$ be a subset of the second category in $\mathfrak{X}$,
and suppose that for each $x \in \mathcal{H}$,
there exists a constant $\kappa_x > 0$ such that
\begin{equation*}
\norm{Tx}_{\mathfrak{Y}} \leq \kappa_x, \quad T \in \mathcal{F}.
\end{equation*}
Then $\mathcal{F}$ is bounded; that is
\begin{equation*}
\sup_{T \in \mathcal{F}} \norm{T} < \infty.
\end{equation*}
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
\begin{crly}[Sparcity of Boundedness of Unbounded Sequences of Bounded Functions between Banach Spaces]\label{crly:sparcity_of_boundedness_of_unbounded_sequences_of_bounded_functions_between_banach_spaces}
Let $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ and
$(\mathfrak{Y}, \norm{\cdot}_{\mathfrak{Y}})$ be Banach spaces, and let
$(T_n)_{n=1}^{\infty}$ be an unbounded sequence in
$\mathcal{C}(\mathfrak{X}, \mathfrak{Y})$,
i.e. $\sup_{n \geq 1} \norm{T_n} = \infty$.
Let $H = \{ x \in \mathfrak{X} : \sup_{n \geq 1} \norm{T_n x} < \infty \}$.
Then $H$ is of the first category in $\mathfrak{X}$, and
$J \coloneqq \mathfrak{X} \setminus H$ is of the second category.
\end{crly}
\begin{proof}
\hlwarn{To be added}
\end{proof}
\begin{remark}[\faStar Further implications of \cref{crly:sparcity_of_boundedness_of_unbounded_sequences_of_bounded_functions_between_banach_spaces}]
The statement that $\sup_{n \geq 1} \norm{T_n} = \infty$
is the statement that for each $n \geq 1$, there exists $x_n \in \mathfrak{X}$
with $\norm{x_n}_{\mathfrak{X}} = 1$ such that
\begin{equation*}
\lim_{n \to \infty} \norm{T_n x_n}_{\mathfrak{Y}} = \infty.
\end{equation*}
In the first place, it is not clear that there should exist
any $x \in \mathfrak{X}$ such that
\begin{equation*}
\lim_{n \to \infty} \norm{T_n x}_{\mathfrak{Y}} = \infty.
\end{equation*}
The above corollary not only says that such a vector $x \in \mathfrak{X}$ exists;
it asserts that this is true for a `very large' set of $x$'s,
in the sense that the set $H$ of $x$'s for which it fails
is a set of the first category in $\mathfrak{X}$.
\end{remark}
We are now ready to answer the question of whether or not
the partial sums of the Fourier series of an element
$[f] \in L_1(\mathbb{T}, \mathbb{C})$ necessarily converge to $[f]$
in the $\norm{\cdot}_1$-norm.
We shall see that by
\cref{crly:sparcity_of_boundedness_of_unbounded_sequences_of_bounded_functions_between_banach_spaces},
this convergence \hlnotec{almost never happens}.
\sidenote{We use the phrase `almost never' to mean the notion of
first category, not measure zero.}
Furthermore, the same argument shows that this is also
the case for $[f] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]$
in the $\norm{\cdot}_{\infty}$.
\begin{thm}[The \textit{unbearable lousiness} of being a Dirichlet Kernel]\label{thm:the_textit_unbearable_lousiness_of_being_a_dirichlet_kernel}
\begin{enumerate}
\item Let
\begin{equation*}
\mathfrak{K}_{\infty} \coloneqq \{ [f] \in [\mathcal{C}(\mathbb{T}, \mathbb{C})]
: [f] = \lim_{N \to \infty} \Delta_N[f]
\in ([\mathcal{C}(\mathbb{T}, \mathbb{C})], \norm{\cdot}_{\infty}) \}.
\end{equation*}
Then $\mathfrak{K}_{\infty}$ is a set of the first category in
$([\mathcal{C}(\mathbb{T},\mathbb{C})], \norm{\cdot}_{\infty})$, whose complement
$[\mathcal{C}(\mathbb{T},\mathbb{C})] \setminus \mathfrak{K}_{\infty}$
is a set of the second category.
\item Let
\begin{equation*}
\mathfrak{K}_1 \coloneqq \{ [f] \in L_1(\mathbb{T}, \mathbb{C})
: [f] = \lim_{N \to \infty} \Delta_N[f]
\in (L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1) \}.
\end{equation*}
Then $\mathfrak{K}_1$ is a set of the first category in
$(L_1(\mathbb{T},\mathbb{C}), \norm{\cdot}_1)$, whose complement
$L_1(\mathbb{T}, \mathbb{C}) \setminus \mathfrak{K}_1$
is a set of the second category.
\end{enumerate}
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
We have entered the darkest days of our course,
and almost at the very end of our journey.
If this were a novel, I bet readers would be crying over
how their favorite hero has fallen,
and the fragility of our other heroes.
But there is some hope in the face of this despair.
% section the_dirichlet_kernel_continued (end)
\section{The Féjer Kernel}%
\label{sec:the_fejer_kernel}
% section the_fejer_kernel
\begin{defn}[$N$\textsuperscript{th}-Cesàro mean]\index{$N$\textsuperscript{th}-Cesàro mean}\label{defn:_n_textsuperscript_th_cesaro_mean}
Let $(\mathfrak{X}, \norm{\cdot}_{\mathfrak{X}})$ be a Banach space,
and $(x_n)_{n=0}^{\infty}$ be a sequence in $\mathfrak{X}$.
The \hlnoteb{$N$\textsuperscript{th}-Cesàro mean} of the sequence
is defined as
\begin{equation*}
\sigma_N \coloneqq_\frac{1}{N} \left( x_0 + x_1 + \hdots + x_{N-1} \right),
\end{equation*}
for $N \geq 1$.
\end{defn}
\begin{propo}[Convergent Sequences have Convergent Cesàro Means]\label{propo:convergent_sequences_have_convergent_cesaro_means}
Suppose that $\mathfrak{X}$ is a Banach space and
$(x_n)_{n=0}^{\infty}$ is a sequence in $\mathfrak{X}$.
Let $(\sigma_N)_{N=1}^{\infty}$ denote the sequence of Cesàro means of
$(x_n)_{n=1}^{\infty}$.
If $x = \lim_{n \to \infty} x_n$ exists, then
\begin{equation*}
x = \lim_{N \to \infty} \sigma_N.
\end{equation*}
\end{propo}
\begin{ex}
Prove \cref{propo:convergent_sequences_have_convergent_cesaro_means}.
\end{ex}
\begin{remark}
The converse of \cref{propo:convergent_sequences_have_convergent_cesaro_means}
is false.
Let $(x_n)_{n=1}^{\infty} = ( (-1)^n )_{n=1}^{\infty}$. Then
\begin{equation*}
(x_0, x_1, x_2, \ldots) = (1, -1, 1, \ldots).
\end{equation*}
We see that $\abs{\sigma_N} \leq \frac{1}{N}$, which then
$\lim_{N \to \infty} \sigma_N = 0$,
but $\lim_{n \to \infty} x_n$ does not exist.
\end{remark}
\begin{defn}[$N$\textsuperscript{th}-Cesàro sum and the Féjer kernel of order $N$]\index{$N$\textsuperscript{th}-Cesàro sum}\index{Féjer kernel of order $N$}\label{defn:_n_textsuperscript_th_cesaro_sum}
Let $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$.
The \hlnoteb{$N$\textsuperscript{th}-Cesàro sum} of the Fourier series
of $f$ is the $N$\textsuperscript{th}-Cesàro mean
of the sequence $(\Delta_n^\circ(f))_{n=0}^{\infty}$.
Thus
\begin{equation*}
\sigma_N^\circ(f)
= \frac{1}{N} (D_0 \diamond f + D_1 \diamond f + \hdots D_{N-1} \diamond f)
= F_N \diamond f,
\end{equation*}
where $F_N \coloneqq \frac{1}{N}(D_0 + D_1 + \hdots + D_{N-1})$
is called the \hlnoteb{Féjer kernel of order $N$}.
We also define the \hlnoteb{$N$\textsuperscript{th}-Cesàro sum} of the Fourier
series of $[f] \in L_1(\mathbb{T}, \mathbb{C})$ as the
$N$\textsuperscript{th}-Cesàro mean of the sequence
$(\Delta_n[f])_{n=0}^{\infty}$, namely
\begin{align*}
\sigma_N[f]
&\coloneqq \frac{1}{N} (D_0 * [f] + D_1 * [f] + \hdots + D_{N-1} * [f]) \\
&= F_N * [f] = [F_N \diamond f] = [\sigma_N^\circ(f)].
\end{align*}
\end{defn}
\begin{remark}
$D_n \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ for all $n \geq 0$
implies that $F_N \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ for all $N \geq 1$.
By
\cref{propo:continuity_of_the_convolution_of_f_and_g_where_g_is_continuous},
it follows that $\sigma_N^\circ(f) \in \mathcal{C}(\mathbb{T}, \mathbb{C})
\subseteq \mathcal{L}_1(\mathbb{T}, \mathbb{C})$
for every $f \in \mathcal{L}_1(\mathbb{T}, \mathbb{C})$.
Furthermore, $\forall \theta \in \mathbb{R}$,
\begin{align*}
\sigma_N^\circ(f)(\theta)
&= \frac{1}{2\pi} \int_{[-\pi ,\pi)} F_N(s) f(\theta - s) \dm(s) \\
&= \frac{1}{2\pi} \int_{[-\pi, \pi)} F_N(\theta - s) f(s) \dm(s).
\end{align*}
By \cref{thm:riemannian_version_of_convolution_on_homogeneous_banach_spaces},
$F_N \in \mathcal{C}(\mathbb{T}, \mathbb{C})$ implies that for every
homogeneous Banach algebra $\mathfrak{B}$ and $[f] \in \mathfrak{B}$,
we have
\begin{equation*}
\sigma_N[f] = F_N * [f] \in \mathfrak{B}.
\end{equation*}
In particular, $\forall [f] \in L_p(\mathbb{T}, \mathbb{C})$,
\begin{equation*}
\sigma_N[f] = F_N * [f] \in L_p(\mathbb{T}, \mathbb{C}).
\end{equation*}
\end{remark}
\begin{thm}[Properties of the Féjer Kernel]\label{thm:properties_of_the_fejer_kernel}
For each $N \in \mathbb{N} \setminus \{ 0 \}$,
\begin{enumerate}
\item $F_N$ is a $2\pi$-periodic, even, continuous function;
\item If $0 \neq \theta \in [-\pi, \pi)$, then
\begin{equation*}
F_N(\theta)
= \frac{1}{N} \left( \frac{1 - \cos(N\theta)}{1 - \cos(\theta)} \right)
= \frac{1}{N} \left( \frac{\sin \left( \frac{N}{2} \theta \right)}
{\sin \left( \frac{1}{2} \theta \right)} \right)^2,
\end{equation*}
while $F_N(0) = N$.
In particular, $F_N(\theta) \geq 0$ for all $\theta \in \mathbb{R}$;
\item
\begin{equation*}
\nu_1(F_N)
= \frac{1}{2\pi} \int_{-\pi}^{\pi} \abs{F_N(\theta)} \dif{\theta}
= \frac{1}{2\pi} \int_{-\pi}^{\pi} F_N(\theta) \dif{\theta} = 1.
\end{equation*}
\item For all $0 < \delta \leq \pi$,
\begin{equation*}
\lim_{N \to \infty} \left(
\int_{-\pi}^{-\delta} \abs{F_N(\theta)} \dif{\theta}
+ \int_{\delta}^{\pi} \abs{F_N(\theta)} \dif{\theta}
\right) = 0; \text{ and }
\end{equation*}
\item For $0 < \abs{\theta} < \pi$,
\begin{equation*}
0 \leq F_N(\theta) \leq \frac{\pi^2}{N \theta^2}.
\end{equation*}
\end{enumerate}
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
\newthought{To the right} are some graphs of $F_N$,
particularly for $F_2$, $F_5$ and $F_{10}$.
\begin{marginfigure}
\centering
\includesvg[width=\marginparwidth]{images/graphofF2.svg}
\caption{Graph of $F_2$}\label{fig:graph_of_f_2_}
\includesvg[width=\marginparwidth]{images/graphofF5.svg}
\caption{Graph of $F_5$}\label{fig:graph_of_f_5_}
\includesvg[width=\marginparwidth]{images/graphofF10.svg}
\caption{Graph of $F_{10}$}\label{fig:graph_of_f_10_}
\end{marginfigure}
Two things worth noticing about them are:
\begin{itemize}
\item the amplitude of the function is increasing near $0$;
this is as we've seen for the Dirichlet kernel,
where $F_N$ is continuous and $F_N(0) = N$ for $N \geq 1$; and
\item for each $\delta > 0$, the functions become uniformly
close to $0$ when $\delta < \abs{\theta} < \pi$.
\end{itemize}
Let us pull out $D_5$ and $F_5$ for comparison.
\begin{figure*}[ht]
\centering
\begin{minipage}[b]{0.5\linewidth}
\centering
\includesvg[width=0.8\linewidth]{images/graphofD5.svg} \\
Graph of $D_5$
\end{minipage}%
\begin{minipage}[b]{.5\linewidth}
\centering
\includesvg[width=0.8\linewidth]{images/graphofF5.svg} \\
Graph of $F_5$
\end{minipage}
\caption{Comparing $D_5$ and $F_5$}
\label{fig:comparing_d_5_and_f_5_}
\end{figure*}
For both $D_5$ and $F_5$, their respective 1-norms
from $-\pi$ to $\pi$ sums to $1$.
For $D_5$, we see that there are many regions where the function is
negative, whereas $F_5$ is always positive.
Furthermore, $F_N$ has the property that
\begin{equation*}
\lim_{N \to \infty} \left(
\int_{-\pi}^{-\delta} \abs{F_N(\theta)} \dif{\theta}
+ \int_{\delta}^{\pi} \abs{F_N(\theta)} \dif{\theta}
\right) = 0,
\end{equation*}
of which $D_N$ does not.
% section the_fejer_kernel (end)
% chapter lecture_21_jul_23rd_2019 (end)
\chapter{Lecture 22 Jul 25th 2019}%
\label{chp:lecture_22_jul_25th_2019}
% chapter lecture_22_jul_25th_2019
\section{The Féjer Kernel (Continued)}%
\label{sec:the_fejer_kernel_continued}
% section the_fejer_kernel_continued
\begin{defn}[Summability Kernel]\index{Summability Kernel}\label{defn:summability_kernel}
A \hlnoteb{summability kernel} is a sequence $(k_n)_{n=1}^{\infty}$
of $2\pi$-periodic, continuous,
\sidenote{A summability kernel can be more general than a continuous sequence,
but for our purposes, this is sufficient.}
complex-valued functions $\mathbb{R}$ satisfying:
\begin{enumerate}
\item $\frac{1}{2\pi} \int_{-\pi}^{\pi} k_n = 1$ for all $n \geq 1$;
\item $\sup_{n \geq 1} \nu_1(k_n) = \sup_{n \geq 1} \frac{1}{2\pi}
\int_{-\pi}^{\pi} \abs{k_n} < \infty$; and
\item for all $0 < \delta \leq \pi$,
\begin{equation*}
\lim_{n \to \infty} \left(
\int_{-\pi}^{-\delta} \abs{k_n}
+ \int_{\delta}^{\pi} \abs{k_n}
\right) = 0.
\end{equation*}
If we further have $k_n \geq 0$ for all $n \geq 1$,
we say that $(k_n)_{n=1}^{\infty}$ is a \hldefn{positive summability
kernel}.
\end{enumerate}
\end{defn}
\begin{thm}[Féjer kernel as a Positive Summability Kernel]\label{thm:fejer_kernel_as_a_positive_summability_kernel}
The Féjer kernel $(F_N)_{N=1}^{\infty}$ is a
positive summability kernel.
\end{thm}
\begin{proof}
\cref{thm:properties_of_the_fejer_kernel} proves exactly this.
\end{proof}
\begin{eg}[Other examples of positive summability kernels]
\begin{enumerate}
\item For each $n \in \mathbb{N} \setminus \{ 0 \}$,
consider the piecewise linear function
\begin{align*}
k_n^{\bigcdot} : [-\pi, \pi) &\to \mathbb{R} \\
\theta &\mapsto \begin{cases}
0 & \theta \in \left[-\pi, -\frac{1}{n}\right] \cup \left[ \frac{1}{n}, \pi \right) \\
n + n^2 \theta & \theta \in \left( -\frac{1}{n}, 0 \right] \\
n - n^2 \theta & \theta \in \left( 0, \frac{1}{n} \right)
\end{cases}.
\end{align*}
\begin{marginfigure}
\centering
\begin{tikzpicture}
\draw[->] (-2.2, 0) -- (2.2, 0);
\draw[->] (0, -0.5) -- (0, 2.2);
\draw[thick,color=cyan] (-2, 0) -- (-0.2, 0) --
(0, 2) -- (0.2, 0) -- (2, 0);
\node[label={270:{$-\pi$}}] at (-2, 0) {|};
\node[label={270:{$\frac{-1}{5}$}}] at (-0.2, 0) {|};
\node[label={270:{$\frac{1}{5}$}}] at (0.2, 0) {|};
\node[label={270:{$\pi$}}] at (2, 0) {|};
\node[label={0:{$5$}}] at (0, 2) {--};
\end{tikzpicture}
\caption{Graph of $k_5^{\bigcdot}$}\label{fig:graph_of_k_5_bigcdot_}
\end{marginfigure}
For $n \in \mathbb{N} \setminus \{ 0 \}$, let $k_n$ be the
$2\pi$-periodic function on $\mathbb{R}$ whose restriction
to the interval $[-\pi, \pi)$ coincides with $k_n^{\bigcdot}$.
Then $(k_n)_{n=1}^{\infty}$ is a positive summability kernel.
\item For each $n \in \mathbb{N} \setminus \{ 0 \}$,
consider the piecewise linear function
\begin{align*}
r_n^{\bigcdot} : [-\pi, \pi) &\to \mathbb{R} \\
\theta &\mapsto \begin{cases}
0 & \theta \in [-\pi, 0] \cup \left[ \frac{2}{n}, \pi \right) \\
n^2 \theta & \theta \in \left( 0, \frac{1}{n} \right] \\
n - n^2 \left( \theta - \frac{1}{n} \right) & \theta \in \left( \frac{1}{n}, \frac{2}{n} \right)
\end{cases}.
\end{align*}
\begin{marginfigure}
\centering
\begin{tikzpicture}
\draw[->] (-2.2, 0) -- (2.2, 0);
\draw[->] (0, -0.5) -- (0, 2.2);
\draw[thick,color=cyan] (-2, 0) -- (-0, 0) --
(0.2, 2) -- (0.4, 0) -- (2, 0);
\node[label={270:{$-\pi$}}] at (-2, 0) {|};
\node[label={270:{$\frac{1}{5}$}}] at (0.2, 0) {|};
\node[label={270:{$\frac{2}{5}$}}] at (0.4, 0) {|};
\node[label={270:{$\pi$}}] at (2, 0) {|};
\node[label={180:{$5$}}] at (0, 2) {--};
\end{tikzpicture}
\caption{Graph of $r_5^{\bigcdot}$}\label{fig:graph_of_r_5_bigcdot_}
\end{marginfigure}
For $n \in \mathbb{N} \setminus \{ 0 \}$, let $r_n$ be the
$2\pi$-periodic function on $\mathbb{R}$ whose restriction
to the interval $[-\pi, \pi)$ coincides with $k_n^{\bigcdot}$.
Then $(r_n)_{n=1}^{\infty}$ is a positive summability kernel.
\end{enumerate}
\end{eg}
\begin{thm}[Summability kernels convolved with functions in Homogeneous Banach Spaces]\label{thm:summability_kernels_convolved_with_functions_in_homogeneous_banach_spaces}
Let $(\mathfrak{B}, \norm{\cdot}_{\mathfrak{B}})$ be
a homogeneous Banach space over $\mathbb{T}$ and
$(k_n)_{n=1}^{\infty}$ be a summability kernel.
If $[f] \in \mathfrak{B}$, then
\begin{equation*}
\lim_{n \to \infty} \norm{ k_n * [f] - [f] }_{\mathfrak{B}} = 0,
\end{equation*}
and so $[f] = \lim_{n \to \infty} k_n * [f]$ in $\mathfrak{B}$.
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
\begin{crly}[Reconcilation of the Cesàro sums to the Original Function]\label{crly:reconcilation_of_the_cesaro_sums_to_the_original_function}
\begin{enumerate}
\item For each $f \in (\mathcal{C}(\mathbb{T}, \mathbb{C}), \norm{\cdot}_{\sup})$,
\begin{equation*}
\lim_{N \to \infty} \sigma_N^\circ(f) = f.
\end{equation*}
\item Let $1 \leq p < \infty$.
For each $[g] \in (L_p(\mathbb{T}, \mathbb{C}), \norm{\cdot}_p)$,
\begin{equation*}
\lim_{N \to \infty} \sigma_N[g] = [g].
\end{equation*}
\end{enumerate}
\end{crly}
\begin{proof}
\hlwarn{To be added}
\end{proof}
We can now show that the Fourier coefficients of functions of
$\mathcal{L}_p(\mathbb{T}, \mathbb{C})$ completely determine themselves (a.e.).
\begin{crly}[Reconcilation of the Fourier series to its Original Function under the Féjer Kernel]\label{crly:reconcilation_of_the_fourier_series_to_its_original_function_under_the_fejer_kernel}
Let $1 \leq p < \infty$. If $[f], [g] \in L_p(\mathbb{T}, \mathbb{C})$ and
$\alpha_n^{[f]} = \alpha_n^{[g]}$ for all $n \in \mathbb{Z}$, then $[f] = [g]$.
\end{crly}
\begin{proof}
Observe that $\alpha_n^{[f]} = \alpha_n^{[g]}$ for all $n \in \mathbb{Z}$,
implies that $\sigma_N[f] = \sigma_N[g]$ for all $N \geq 1$.
It follows from
\cref{crly:reconcilation_of_the_cesaro_sums_to_the_original_function} that
\begin{equation*}
[f] = \lim_{N \to \infty} \sigma_N[g] = \lim_{N \to \infty} \sigma_N[g] = [g].
\end{equation*}
\end{proof}
% section the_fejer_kernel_continued (end)
\section{Which sequences are sequences of Fourier Coefficients?}%
\label{sec:which_sequences_are_sequences_of_fourier_coefficients_}
% section which_sequences_are_sequences_of_fourier_coefficients_
Given $[f] \in L_1(\mathbb{T}, \mathbb{C})$, we defined the Fourier series of
$[f]$ as
\begin{equation*}
\sum_{n \in \mathbb{Z}} \alpha_n^{[f]} [\xi_n].
\end{equation*}
The \hyperref[thm:the_riemann_lebesgue_lemma]{Riemann-Lebesgue Lemma} stated
that
\begin{equation*}
(\alpha_n^{[f]})_{n \in \mathbb{Z}} \in c_0(\mathbb{Z}, \mathbb{C}).
\end{equation*}
It is then natural to ask if every sequence $(\beta_n)_{n \in \mathbb{Z}}
\in c_0(\mathbb{Z}, \mathbb{C})$ is the sequence of coefficients
of some $[f] \in L_1(\mathbb{T}, \mathbb{C})$.
What we have seen is that on Hilbert spaces,
every $(\gamma_n)_{n \in \mathbb{Z}} \in \ell_2(\mathbb{Z}, \mathbb{C})$
is the set of Fourier coefficients of some $[f] \in L_2(\mathbb{T}, \mathbb{C})$,
and namely
\begin{equation*}
[f] = \sum_{n \in \mathbb{Z}} \gamma_n[\xi_n].
\end{equation*}
We shall use \hlnotea{Operator Theory} to answer this.
Recall that by the end of \cref{sec:introduction_to_fourier_analysis_continued},
we introduced the map
\begin{align*}
\Lambda : (L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)
&\to (c_0(\mathbb{Z}, \mathbb{C}), \norm{\cdot}_\infty) \\
[f] &\mapsto \left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}}.
\end{align*}
Since the Lebesgue integration is linear, so is $\Lambda$.
Also, as shown before,
\begin{equation*}
\abs{\alpha_n^{[f]}} \leq \norm{[f]}_1, \quad \forall n \in \mathbb{Z},
\end{equation*}
and so
\begin{equation*}
\norm{\Lambda[f]}_{\infty}
= \sup \{ \abs{\alpha_n^{[f]}} : n \in \mathbb{Z} \} \leq \norm{[f]}_1.
\end{equation*}
Thus $\Lambda$ is bounded, with $\norm{\Lambda} \leq 1$.
By
\cref{crly:reconcilation_of_the_fourier_series_to_its_original_function_under_the_fejer_kernel},
if $[f], [g] \in L_1(\mathbb{T}, \mathbb{C})$ and $\Lambda[f] = \Lambda[g]$,
then $[f] = [g]$, and thus $\Lambda$ is injective.
Thus, our question of whether or not every sequence in $c_0(\mathbb{Z}, \mathbb{C})$
is a sequence of Fourier coefficients of some element of $L_1(\mathbb{T}, \mathbb{C})$
is therefore the question of whether or not $\Lambda$ is surjective.
We require the \hlnotea{Inverse Mapping Theorem} from \hlnotea{Functional
Analysis} to answer this question. To that end,
we first introduce some notations.
Given a Banach space $(\mathfrak{Z}, \norm{\cdot}_{\mathfrak{Z}})$
and a real number $r > 0$, we denote the \hlnoteb{closed ball of radius $r$
centered at the origin} by
\begin{equation*}
\mathfrak{Z}_r = \{ z \in \mathfrak{Z} : \norm{z}_{\mathfrak{Z}} \leq r \}.
\end{equation*}
For $z_0 \in \mathfrak{Z}$ and $\epsilon > 0$, we denote by
$B^{\mathfrak{Z}}(z_0, \epsilon) = \{ z \in \mathfrak{Z} : \norm{z - z_0} < \epsilon \}$
the open ball of radius $\epsilon$ in $\mathfrak{Z}$,
centered at $z_0$.
\begin{lemma}[Finding an Open Container from a Closed Container]\label{lemma:finding_an_open_container_from_a_closed_container}
Let $\mathfrak{X}$ and $\mathfrak{Y}$ be Banach spaces
and suppose that $T \in \mathcal{B}(\mathfrak{X}, \mathfrak{Y})$.
If $\mathfrak{Y}_1 \subseteq \overline{T \mathfrak{X}_m}$ for some $m \geq 1$,
then $\mathfrak{Y}_1 \subseteq T \mathfrak{X}_{2m}$.
\end{lemma}
\begin{proof}
\hlwarn{To be added}
\end{proof}
\begin{thm}[The Open Mapping Theorem]\index{The Open Mapping Theorem}\label{thm:the_open_mapping_theorem}
Let $\mathfrak{X}$ and $\mathfrak{Y}$ be Banach spaces and
suppose that $T \in \mathcal{B}(\mathfrak{X}, \mathfrak{Y})$
is a surjection.
Then $T$ is an open map; i.e. if $G \subseteq \mathfrak{X}$ is open,
then $TG \subseteq \mathfrak{Y}$ is open.
\end{thm}
\begin{proof}
\hlwarn{To be added}
\end{proof}
\begin{crly}[The Inverse Mapping Theorem]\index{The Inverse Mapping Theorem}\label{crly:the_inverse_mapping_theorem}
Let $\mathfrak{X}$ and $\mathfrak{Y}$ be Banach spaces and
suppose that $T \in \mathcal{B}(\mathfrak{X}, \mathfrak{Y})$ is a bijection.
Then $T^{-1}$ is continuous, and so $T$ is a homeomorphism.
\end{crly}
\begin{proof}
Since $T$ is linear, by basic linear algebra, it has an inverse,
which must also be linear.
If $G \subseteq \mathfrak{X}$ is open, then $(T^{-1})^{-1}(G) = TG$ is open
in $\mathfrak{Y}$ by the Open Mapping Theorem.
Thus $T^{-1}$ is continuous, hence a homeomorphism.
\end{proof}
\begin{thm}[$L_1(\mathbb{T}, \mathbb{C})$ and $c_0(\mathbb{Z}, \mathbb{C})$ are Not Isomorphic]\label{thm:_l_1_mathbb_t_mathbb_c_and_c_0_mathbb_z_mathbb_c_are_not_isomorphic}
The map
\begin{align*}
\Lambda : (L_1(\mathbb{T}, \mathbb{C}), \norm{\cdot}_1)
&\to (c_0(\mathbb{Z}, \mathbb{C}), \norm{\cdot}_\infty) \\
[f] &\mapsto \left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}}
\end{align*}
is not surjective.
\end{thm}
\begin{proof}
If it were surjective, then by the Inverse Mapping Theorem,
\begin{align*}
\Lambda^{-1} : c_0(\mathbb{Z}, \mathbb{C}) &\to L_1(\mathbb{T}, \mathbb{C}) \\
\left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}} &\mapsto [f]
\end{align*}
must be continuous.
Let $D_N$ be the \hyperref[defn:dirichlet_kernel_of_order_n_]{Dirichlet kernel
of order $N$}, and let $d_N \coloneqq \Lambda[D_N]$, for $N \geq 1$.
Then $d_N = (\ldots, 0, 0, \ldots, 0, 1, 1, \ldots, 1, 1, 0, 0, \ldots)$,
where the $1$'s appear for the indices $-N \leq k \leq N$.
It is clear that $\norm{d_N}_{\infty} = 1$,
since each $d_N$ is \hlnotea{finitely supported},
\sidenote{This means that there are only finitely many non-zero values in its
indices, which is the case.}
but by part 4 of \cref{thm:properties_of_the_dirichlet_kernel},
\begin{equation*}
\lim_{N \to \infty} \norm{\Lambda^{-1}(d_N)}_1
= \lim_{N \to \infty} \norm{[D_N]}_1 = \infty.
\end{equation*}
Thus $\Lambda^{-1}$ is not continuous, a contradiction.
Hence $\Lambda$ must not be surjective.
In other words, there exists a sequence $(\beta_n)_{n \in \mathbb{Z}}
\in c_0(\mathbb{Z}, \mathbb{C})$ that are not Fourier coefficients
of any element of $L_1(\mathbb{T}, \mathbb{C})$.
\end{proof}
\begin{remark}
As remarked, the fact that $[f] \in L_2(\mathbb{T}, \mathbb{C})$ iff
$\left( \alpha_n^{[f]} \right)_{n \in \mathbb{Z}} \in \ell_2(\mathbb{Z}, \mathbb{C})$
makes it tempting to conjecture that perhaps the range of the map
$\Lambda$ from
\cref{thm:_l_1_mathbb_t_mathbb_c_and_c_0_mathbb_z_mathbb_c_are_not_isomorphic}
should be $\ell_1(\mathbb{Z}, \mathbb{C})$, but that is not true at all,
and it is not even surjective on
$c_0(\mathbb{Z}, \mathbb{C}) \subseteq \ell_1(\mathbb{Z}, \mathbb{C})$.
For a clear example, the sequence
\begin{equation*}
\beta_n = \begin{cases}
\frac{1}{n} & n \geq 1 \\
0 & n \leq 0
\end{cases}
\end{equation*}
is clearly in $\ell_2(\mathbb{Z}, \mathbb{C})$,
and so $[f] \coloneqq \sum_{n \in \mathbb{Z}} \beta_n [\xi_n]$
converges in $L_2(\mathbb{T}, \mathbb{C}) \subseteq L_1(\mathbb{T}, \mathbb{C})$.
However,
\begin{equation*}
\Lambda[f] = (\beta_n)_{n \in \mathbb{Z}}
\end{equation*}
is definitely not in $\ell_1(\mathbb{Z}, \mathbb{C})$.
\end{remark}
% section which_sequences_are_sequences_of_fourier_coefficients_ (end)
% chapter lecture_22_jul_25th_2019 (end)
\appendix
\chapter{Interest in \texorpdfstring{$2 \pi$}{2pi} periodic functions}%
\label{chp:interest_in_2_pi_periodic_functions}
% chapter interest_in_2_pi_periodic_functions
This is ripped out of Professor Marcoux's \cite{marcoux2019} notes,
which I think is rather important as a motivation to move from
Lesbesgue's Theory of Integration into Fourier Analysis,
but not important enough to warrant being added to the
main section of the notes.
\begin{quotebox}{magenta}{foreground}
So where does the notation $L_1 (\mathbb{T}, \mathbb{C})$ come from,
given that we are dealing with $2 \pi$-periodic functions on $\mathbb{R}$?
The issue lies in the fact that we are really interested in studying functions
on $\mathbb{T} \coloneqq \{z \in \mathbb{C} : \abs{z} = 1\}$,
but that we have not yet defined what we mean by a measure on that set.
We are therefore identifying $[-\pi, \pi)$ with $\mathbb{T}$ via
the bijective function $\psi(\theta) = e^{i \theta}$.
Thus, an alternative approach to this would be to say that
a subset $E \subseteq T$ is measurable if and only if
$\psi^{-1}(E) \subseteq [-\pi, \pi)$ is Lebesgue measurable.
In order to ``normalize'' the measure of $\mathbb{T}$
(i.e. to make its measure equal to 1),
we simply divide Lebesgue measure on $[-\pi, \pi)$ by $2 \pi$.
This still doesn't quite explain why we are interested in $2 \pi$-periodic functions
on $\mathbb{T}$, rather than just functions on $[-\pi, \pi)$, though.
Here is the ``kicker''.
The unit circle $\mathbb{T} \subseteq \mathbb{C}$ has a very special property,
namely, that it is a group.
Given $\theta_0 \in \mathbb{T}$, we can ``rotate'' a function
$f : \mathbb{T} \to \mathbb{C}$ in the sense that we set
$g(\theta) = f (\theta \cdot \theta_0 )$.
Observe that rotation along $\mathbb{T}$ corresponds to translation
(modulo $2 \pi$) of the interval $[-\pi, \pi)$.
The key is the irritating ``modulo $2 \pi$'' problem.
If we don't use modular arithmetic, and if a function $g$ is only defined
on $[-\pi, \pi)$, we can not ``translate'' it,
since the new function need no longer have $[-\pi, \pi)$ as its domain.
We get around this by extending the domain of $g$ to $\mathbb{R}$
and making $g$ $2 \pi$-periodic.
Then we may translate $g$ by any real number
$\tau_s^\circ(g)(\theta) \coloneqq g(\theta - s)$,
which has the effect that if we set $f (e^{i \theta}) = g(\theta)$,
then $g(\theta - s) = f (e^{i \theta} \cdot e^{-is})$.
That is, translation of $g$ under addition corresponds to
rotation of $f$ under multiplication.
The last thing that we need to know is that such translations of functions will
play a crucial role in our study
of Fourier series of elements of $L_1(\mathbb{T}, \mathbb{C})$.
Aside from being a Banach space, $L_1(\mathbb{T}, \mathbb{C})$ can be made
into an algebra under convolution.
While our analysis will not take us as far as that particular result,
we will still need to delve into the theory of convolutions of continuous functions
with functions in $L_1(\mathbb{T}, \mathbb{C})$.
This will provide us with a way of understanding how and why various series
associated to the Fourier series of an element $[f] \in L_1(\mathbb{T}, \mathbb{C})$
converge or diverge.
Since convolutions are defined as averages under translation by the group action,
and since $\mathbb{T}$ is a group under multiplication
and $\mathbb{R}$ is a group under addition,
our identification of $(\mathbb{T}, \cdot)$ with $([-\pi, \pi), +)$
(using modular arithmetic) is not an unreasonable way of doing things.
\end{quotebox}
% chapter interest_in_2_pi_periodic_functions (end)
\chapter{Assignment Problems}%
\label{chp:assignment_problems}
% chapter assignment_problems
\section{Assignment 1 (A1)}%
\label{sec:assignment_1}
% section assignment_1
\begin{assgprob}[Separated Sets]
Let $A$ and $B$ be bounded subsets of $\mathbb{R}$
and suppose that
\begin{equation*}
\delta \coloneqq \dist(A, B)
\coloneqq \inf \{ \abs{a - b} : a \in A,\, b \in B \} > 0.
\end{equation*}
Prove that $m^*(A \cup B) = m^*(A) + m^*(B)$.
\end{assgprob}
\begin{assgprob}[A continuity result for Outer Measures]
Let $E \subseteq \mathbb{R}$. Prove that
\begin{equation*}
\lim_{N \to \infty} m^*(E \cap [-N, N]) = m^*(E).
\end{equation*}
\end{assgprob}
\begin{assgprob}[Finite Covers of {$[0, 1]$}]
Let $\Gamma = \mathbb{Q} \cap [0, 1]$.
Prove that if $\{ I_n \}_{n=1}^{N}$ is a finite collection
of open intervals which covers $\Gamma$
(i.e. $\Gamma \subseteq \bigcup_{n=1}^{N} I_n$),
then $\sum_{n=1}^{N} \ell(I_n) \geq 1$.
\end{assgprob}
\begin{assgprob}[Measures on Countable Sets]
Let $X = \{ x_n \}_{n=1}^{\infty}$ be a countable set,
and recall that $\mathcal{P}(X)$ is the \textbf{power set} of $X$,
i.e. $\mathcal{P}(X) = \{ Y : Y \subseteq X \}$.
A \textbf{measure} on $X$ is a function
\begin{equation*}
\mu : \mathcal{P}(X) \to [0, \infty]
\end{equation*}
such that $\mu(\emptyset) = 0$
and for every \textit{disjoint} sequence $E_n \subseteq X$, $n \geq 1$,
we have
\begin{equation*}
\mu \left( \bigcup_{n=1}^{\infty} E_n \right)
= \sum_{n=1}^{\infty} \mu(E_n).
\end{equation*}
Let $\mathcal{M}(X) = \{ \mu : \mu \text{ is a measure on } X \}$.
Find a description of all possible measures on $X$;
i.e., show that there exist sets $A$ and $B$
and a bijective map $\theta : \mathcal{M}(X) \to S \coloneqq B^A$.
\end{assgprob}
\begin{assgprob}[Open Subsets of $\mathbb{R}$]
Prove that if $G \subseteq \mathbb{R}$ is open,
then $G$ is a countable, \textit{disjoint} union of open intervals. \linebreak
\textbf{Hint}: Define a relation on $G$ via $x \sim y$
if $[\min \{ x, y \}, \max \{ x , y \}] \subseteq G$.
\end{assgprob}
\begin{assgprob}[Towards Borel Sets]
Let $E \subseteq \mathbb{R}$.
We say that $E$ is a $G_{\delta}$-set if it is
a countable intersection of open subsets of $\mathbb{R}$.
We say that $E$ is an $F_\sigma$-set if it is
a countable union of closed subsets of $\mathbb{R}$.
Recall that for $E \subseteq \mathbb{R}$,
$E^C \coloneqq \{ x \in \mathbb{R} : x \notin E \}$ is the complement of $E$.
\begin{enumerate}
\item Prove that every open set is an $F_\sigma$-set,
and that every closed set is a $G_\delta$-set.
\item Prove that the set of rational numbers is an $F_\sigma$-set,
but not a $G_\delta$-set.
\item Prove that the set of irrational numbers is a $G_\delta$-set,
but not an $F_\sigma$-set.
\item Let $E_1 = (-\infty, 0] \cap \mathbb{Q}^C$
and $E_2 = [0, \infty) \cap \mathbb{Q}$.
Prove that $E \coloneqq E_1 \cup E_2$ is
neither a $G_\delta$-set nor an $F_\sigma$-set.
\end{enumerate}
\end{assgprob}
% section assignment_1 (end)
\newpage
\section{Assignment 2 (A2)}%
\label{sec:assignment_2}
% section assignment_2
\begin{assgprob}[$\sigma$-additivity and continuity of the Lebesgue Measure]
\begin{enumerate}
\item Let $\mathcal{L}(\mathbb{R})$ denote
the set of Lebesgue measurable subsets of $\mathbb{R}$, and
let $m : \mathcal{L}(\mathbb{R}) \to [0, \infty]$ denote
the Lebesgue measure.
Prove that $m$ is $\sigma$-additive; i.e. if
$E_n \in \mathcal{L}(\mathbb{R})$ for all $n \geq 1$
and $E_i \cap E_j = \emptyset$ if $1 \leq oi \neq j < \infty$, then
\begin{equation*}
m \left( \bigcupdot_{n=1}^{\infty} E_n \right)
= \sum_{n=1}^{\infty} m(E_n).
\end{equation*}
\item Suppose that $\{ E_n \}_{n=1}{\infty}$ is an
\textit{increasing} sequence of Lebesgue measurable sets; i.e.
\begin{equation*}
E_1 \subseteq E_2 \subseteq E_3 \subseteq \hdots.
\end{equation*}
Let $E = \bigcup_{n=1}^{\infty} E_n$, so that $E \in \mathcal{L}(\mathbb{R})$,
as the latter is a $\sigma$-algebra.
Prove that
\begin{equation*}
mE = \lim_{n \to \infty} mE_n.
\end{equation*}
\end{enumerate}
\end{assgprob}
\begin{assgprob}[Continuity of the Lebesgue Measure II]
Let $\{ E_n \}_{n=1}^{\infty}$ be a \textit{decreasing} sequence
of Lebesgue measurable sets; i.e.
\begin{equation*}
E_1 \supseteq E_2 \supseteq E_3 \supseteq \hdots.
\end{equation*}
Let $E = \bigcap_{n=1}^{\infty} E_n$.
\begin{enumerate}
\item \label{item:a2q2a}
Suppose that $mE_1 < \infty$. Prove that
\begin{equation*}
mE = \lim_{n \to \infty} mE_n.
\end{equation*}
\item Does the result of part \ref{item:a2q2a} still hold if $mE_1 = \infty$?
Prove that it does, or provide a counterexample to show
that it need not be true.
\end{enumerate}
\end{assgprob}
\begin{assgprob}[Lebesgue Inner Measure]
Let $E \subseteq \mathbb{R}$ be a set. Prove that the following are equivalent:
\begin{enumerate}
\item $E$ is measurable; i.e. $E \in \mathcal{L}(\mathbb{R})$;
\item For all $\epsilon > 0$, there exists a closed set $F \subseteq E$
so that $m^*(E \setminus F) < \epsilon$;
\item There exists an $F_\sigma$-set $H \subseteq E$
so that $m^*(E \setminus H) = 0$.
\end{enumerate}
Use this to show that if $E \subseteq \mathbb{R}$ is measurable, then
\begin{equation*}
mE = \sup \{ mK : K \subseteq E,\, K \text{ is compact } \}.
\end{equation*}
We say that the Lebesgue measure is \textbf{regular}.
\end{assgprob}
\begin{assgprob}[$\sigma$-algebra of Sets]
\begin{enumerate}
\item Let $\mathcal{A}$ be a $\sigma$-algebra of subsets of $\mathbb{R}$,
and let $f : \mathbb{R} \to \mathbb{R}$ be a function.
Let $\mathcal{B} = \{ H \subseteq \mathbb{R} : f^{-1}(H) \in \mathcal{A} \}$.
Show that $\mathcal{B}$ is a $\sigma$-algebra.
\item Recall that by definition, $f : \mathbb{R} \to \mathbb{R}$
is measurable if $f^{-1}(G) \subseteq \mathcal{L}(\mathbb{R})$
for all open sets $G \subseteq \mathbb{R}$.
Use this definition to prove that $f$ is measurable
if and only if $f^{-1}(B) \subseteq \mathcal{L}(\mathbb{R})$
for all Borel sets $B \in \Bor(\mathbb{R})$.
\item We say that $f : \mathbb{R} \to \mathbb{R}$ is
\textbf{Borel measurable} if $f^{-1}((a, \infty)) \in \Bor(\mathbb{R})$
for all $a \in \mathbb{R}$.
Prove that $f$ is Borel measurable if and only if
$f^{-1}(B) \subseteq \Bor(\mathbb{R})$
for all Bore sets $B \in \Bor(\mathbb{R})$.
\end{enumerate}
\end{assgprob}
\begin{assgprob}[The Cantor-Lebesgue Function]
Recall that we define the \textbf{Cantor (middle third) set} $C$
as $C = \bigcap_{n \geq 1} C_n$, where $C_0 = [0, 1]$,
and for each $n \geq 1$,
\begin{equation*}
C_n = C_{n-1} \setminus \{
I_{n,1} \cup I_{n, 2} \cup \hdots \cup I_{n, 2^{n-1}}
\},
\end{equation*}
where $I_{n, j}$ is the open ``middle third''
of the $j$\textsuperscript{th} (closed) interval of $C_{n-1}$.
If we set
\begin{equation*}
G = \bigcup_{n \geq 1} \bigcup_{1 \leq j \leq 2^{n-1}} I_{n, j} ,
\end{equation*}
then the Cantor set is equal to $[0, 1] \setminus G$.
We define the \textbf{Cantor-Lebesgue function} $\Gamma_C$ on $[0, 1]$
as follows.
For $x \in I_{n, j}$, we set $\Gamma_C(x) = \frac{2j - 1}{2^n}$,
$1 \leq j \leq 2^{n-1}$.
We then extend $\Gamma_C$ to all of $[0, 1]$
by setting $\Gamma_C(0) = 0$, and for $x \in (0, 1]$, we set
\begin{equation*}
\Gamma_C(x) = \sup \{ \Gamma_C(t) : t \in [0, x) \cap G \}.
\end{equation*}
\begin{enumerate}
\item Prove that the Cantor-Lebesgue function $\Gamma_C$
is an increasing, continuous function that maps
$[0, 1]$ onto $[0, 1]$.
\item Prove that if $\gamma(x) = \Gamma_C(x) + x$ for all $x \in [0, 1]$,
then $\phi$ is a continuous function that maps $[0, 1]$
onto $[0, 2]$.
\item Prove that $\phi(X) \coloneqq \{ \phi(x) : x \in C \} \subseteq [0, 2]$
is a measurable set of positive measure.
\end{enumerate}
\end{assgprob}
\begin{defn}[Limit Superior and Limit Inferior]\index{Limit Superior}\index{Limit Inferior}\label{defn:limit_superior_and_limit_inferior}
Suppose that $(x_n)_n \in \mathbb{R}^{\mathbb{N}}$ is a bounded
\textit{sequence of real numbers}.
We define the \hlnoteb{limit superior} (or \hlnoteb{limit supremum})
of the sequence $(x_n)_n$ to be
\begin{equation*}
\limsup_{n \geq 1} x_n \coloneqq \lim_{n \to \infty} \sup_{k \geq n} x_k,
\end{equation*}
and the \hlnoteb{limit inferior} (or \hlnoteb{limit infimum}) to be
\begin{equation*}
\liminf_{n \geq 1} x_n \coloneqq \lim_{n \to \infty} \inf_{k \geq n} x_k.
\end{equation*}
Setting $z_n : \sup_{k \geq n} x_k$, for $n \geq 1$,
we find that $z_n \geq z_{n + 1}$ for all $n \geq 1$,
and, from this, one should be able to convince themselves
that $\limsup_{n \geq 1} x_n$ always exists,
and similarly that $\liminf_{n \geq 1} x_n$ always exists.
Moreover, if $\mu \leq x_n \leq \nu$ for all $n \geq 1$
(since we assumed that $(x_n)_n$ is bounded), then
\begin{equation*}
\mu \leq \liminf_{n \geq 1} x_n \leq \limsup_{n \geq 1} x_n \leq \nu.
\end{equation*}
If $(x_n)_n$ is not bounded above,
we define $\limsup_{n \geq 1} x_n = \infty$,
while if $(x_n)_n$ is not bounded below,
we define $\liminf_{n \geq 1} x_n = -\infty$.
\end{defn}
\begin{assgprob}[Lim Sups and Lim Infs --- I]
Let $(x_n)_n \in \mathbb{R}^{\mathbb{N}}$ be a bounded sequence
of real numbers.
\begin{enumerate}
\item Prove that
\begin{equation*}
\limsup_{n \geq 1} x_n
= \inf \{ \gamma \in \mathbb{R} : \exists N > 0 \enspace
\forall n \geq N\enspace x_n < \Gamma \}.
\end{equation*}
\item Prove that if $\limsup_{n \geq 1} x_n < \mu$,
then there exists $N \in \mathbb{N}$ such that
$n \geq N$ implies that $x_n < \mu$.
(We say that $x_n < \mu$ for large $n$.)
\item Prove that $\limsup_{n \geq 1} x_n > \mu$
implies that $x_n > \mu$ for infinitely many values of $n \in \mathbb{N}$.
\item Show that if $(a_n)_n$ and $(b_n)_n$ are bounded sequences
of real numbers, then
\begin{equation*}
\limsup_{n \geq 1} (a_n + b_n)
\leq \limsup_{n \geq 1} a_n + \limsup_{n \geq 1} b_n.
\end{equation*}
Give an example to show that equality need not occur.
\end{enumerate}
\end{assgprob}
\begin{assgprob}[Lim Sups and Lim Infs --- II]
\begin{enumerate}
\item Let $(x_n)_n \in \mathbb{R}^{\mathbb{N}}$ be a bounded sequence.
Prove that if $\beta \coloneqq \limsup_{n \geq 1} x_n$, then
\begin{enumerate}
\item there exist a subsequence $(x_{n_k})_k$ of
$(x_n)_n$ such that $lim_{k \to \infty} x_{n_k} = \beta$;
and
\item if $(x_{m_k})_k$ is any subsequence of $(x_n)_n$ which converges,
say to some $\alpha \in \mathbb{R}$, then $\alpha \leq \beta$.
\end{enumerate}
In other words, $\beta$ is the largest limit point of any subsequence
of $(x_n)_n$.
\item Let $(y_n)_n \in \mathbb{R}^{\mathbb{N}}$ be a sequence.
Prove that the following conditions are equivalent:
\begin{enumerate}
\item there exists $\gamma \in \mathbb{R}$ such that
$\lim_{n \to \infty} y_n = \gamma \in \mathbb{R}$;
i.e. $(y_n)_n$ is convergent (to $\gamma$); and
\item $\limsup_{n \geq 1} y_n = \gamma = \liminf_{n \geq 1} y_n$.
\end{enumerate}
\end{enumerate}
\end{assgprob}
% section assignment_2 (end)
\newpage
\section{Assignment 3 (A3)}%
\label{sec:assignment_3}
% section assignment_3
\begin{assgprob}[Measurability of Extended Real-Valued Functions]
Recall that $\overline{\mathbb{R}} = \mathbb{R} \cup \{ - \infty, \infty \}$
denotes the set of \textbf{extended real numbers}.
Let $f : \mathbb{R} -> \overline{\mathbb{R}}$ be a function,
and recall that $f$ is said to be measurable if
$f^{-1}(G) \in \mathcal{L}(\mathbb{R})$ for all open sets $G \subseteq \mathbb{R}$
and $f^{-1}(\{-\infty\}), f^{-1}(\{\infty\}) \in \mathcal{L}(\mathbb{R})$.
Prove that the following are equivalent:
\begin{enumerate}
\item $f$ is measurable.
\item For all $\alpha \in \mathbb{R}$,
$f^{-1}((\alpha, \infty]) \in \mathcal{L}(\mathbb{R})$.
\item For all $\beta \in \mathbb{R}$,
$f^{-1}([-\infty \beta)) \in \mathcal{L}(\mathbb{R})$.
\end{enumerate}
\end{assgprob}
\begin{assgprob}[Measurable Functions as Limits of Simple Functions]
\begin{enumerate}
\item Let $f : \mathbb{R} \to [0, \infty]$ be a measurable function.
Show that there exists an increasing sequence of measurable,
simple functions $\phi_n : \mathbb{R} \to [0, \infty)$ so that
\begin{equation*}
f(x) = \lim_{n \to \infty} \phi_n(x),\quad \forall x \in \mathbb{R}.
\end{equation*}
\item Let $E \in \mathcal{L}(\mathbb{R})$ and let
$g : E \to [0, \infty]$ be a measurable function.
Show that there exists an increasing sequence of measurable,
simple functions $\psi_n : E \to [0, \infty)$ so that
\begin{equation*}
g(x) = \lim_{n \to \infty} \psi_n(x),\quad \forall x \in E.
\end{equation*}
\end{enumerate}
\noindent
\textbf{Hint for 1.}: For each $n \geq 1$, partition the interval $[0, n)$
into $n 2^n$ equal subintervals $E_{k, n} = \left[ \frac{k}{2^n},
\frac{k+1}{2^n} \right)$, $0 \leq k < (n 2^n) - 1$.
Let $E_{n 2^n, n]} = [n, \infty]$.
Use the sets $f^{-1}(E_{k, n})$, $0 \leq k \leq n 2^n$ to build $\phi_n$.
\noindent
\textbf{Hint for 2.}: This should be very short. Otherwise, you're doing
something wrong.
\end{assgprob}
\begin{assgprob}[An Example]
\begin{enumerate}
\item \label{item:a3q3a}
Let $E = [0, 1]$. Fix $m \geq 1$ and let $f : E \to \mathbb{R}$
be the function $f(x) = x^m$.
Since $f$ is continuous, $f$ is measurable.
Prove that the Lebesgue integral of $f$ over $E$ satisfies
\begin{equation*}
\int_{[0, 1]} f = \frac{1}{m + 1}.
\end{equation*}
Note: You may not use
\cref{thm:bounded_riemann_integrable_functions_are_lebesgue_integrable}.
You must prove this using only techniques available to Lebesgue
integration, and it should suffice to do so with knowledge from before
\cref{thm:bounded_riemann_integrable_functions_are_lebesgue_integrable}. \linebreak
(\textbf{Hint}: The Monotone Convergence Theorem with the last problem
should be useful.)
\item Let $g : E \to [0, 1]$ be the function $g(x) = e^x$, i.e.
the exponential function.
Prove that the Lebesgue integral of $g$ satisfies
\begin{equation*}
\int_{0}^{1} g = e^1 - 1 = e - 1.
\end{equation*}
\textbf{Hint}: this should be much easier than part \ref{item:a3q3a}.
\end{enumerate}
\end{assgprob}
\begin{assgprob}[Sets of Positive Measure are ``Large'']
\begin{enumerate}
\item \label{item:a3q4a}
Let $E \in \mathfrak{M}(\mathbb{R})$ be a measurable set and
suppose that $m(E) > 0$, i.e. $E$ has strictly positive Lebesgue measure.
Prove that the set
\begin{equation*}
H = E - E \coloneqq \{ x - y : x, y \in E \}
\end{equation*}
contains an interval.
Hints:
\begin{itemize}
\item First, reduce to the case where $0 < mE < \infty$.
\item Show that there exists an open interval $I = (a, b)$
so that $m(E \cap (a, b)) > 0.9(b - a)$.
\item Let $F \coloneqq E \cap (a, b) \subseteq E$.
Suppose that $\alpha \in (-\delta, \delta)$,
where $\delta = 0.1(b - a)$.
Show that $F \cap (F + \alpha) \neq \emptyset$ by considering
upper and lower bounds for the measure of $F \cup (F + \alpha)$.
\end{itemize}
\item \label{item:a3q4b}
Conclude that if $E \in \mathfrak{M}(\mathbb{R})$
satisfies $m(E) > 0$, then
the cardinality of $E$ coincides with $c$,
the cardinality of $\mathbb{R}$.
\textbf{Hint}: Part \ref{item:a3q4b} is doable even if you did not get
part \ref{item:a3q4a}.
\end{enumerate}
\end{assgprob}
\begin{assgprob}[The Ubiquity of Non-Measurable Sets]
Prove that every measurable set $E$ of (strictly) positive measure
contains a non-measurable subset $Z$. \linebreak
\textbf{Hints}: You may wish to reduce to the case where $E \subseteq [-N, N]$
for some $N > 0$.
Then, you may wish to refer to the existence of non-measurable sets
as demonstrated in the notes.
You may DIY as well if you have an idea on what to do.
\end{assgprob}
\begin{assgprob}[Pointwise Convergence of Measurable Functions]
Let $E \in \mathfrak{M}(\mathbb{R})$ be a measurable set
and suppose that $m(E) < \infty$.
Let $(f_n)_n \in \mathcal{L}(E, \mathbb{R})$ and
suppose that $(f_n)_n$ converges pointwise to a real-valued function
$f : E \to \mathbb{R}$.
Prove that if $\epsilon > 0$ and $\delta > 0$, then there exists
a measurable set $H \subseteq E$ and an integer $N \geq 1$ such that
\begin{enumerate}
\item $m(H) < \delta$; and
\item $x \notin H$ implies that $\abs{f_n(x) - f(x)} < \epsilon$ for all
$n \geq N$.
\end{enumerate}
\end{assgprob}
% section assignment_3 (end)
\newpage
\section{Assignment 4 (A4)}%
\label{sec:assignment_4}
% section assignment_4
\begin{assgprob}[Completeness in Normed Linear Spaces]
Let $(\mathfrak{X}, \norm{\cdot})$ be a normed linear space.
Prove that $\mathfrak{X}$ is complete, and hence a Banach space,
if and only if every absolutely summable series in $\mathfrak{X}$ is summable.
Here, a series $\sum_{n=1}^{\infty} x_n$ in $\mathfrak{X}$ is said
to be \textbf{summable} if
\begin{equation*}
x \coloneqq \lim_{N \to \infty} \sum_{n=1}^{N} x_n
\end{equation*}
exists in $\mathfrak{X}$, while the series is said to be
\textbf{absolutely summable} if
\begin{equation*}
\lim_{N \to \infty} \sum_{n=1}^{N} \norm{x_n} < \infty.
\end{equation*}
\end{assgprob}
\begin{assgprob}[$\ell_p$-Spaces, Part I]
Let $1 \leq p < \infty$.
For $\mathbb{K} = \mathbb{R}$ or $\mathbb{K} = \mathbb{C}$, we define
the $\ell_p$-spaces:
\begin{equation*}
\ell_p = \ell_p(\mathbb{N})
\coloneqq \left\{ x \in (x_n)_{n=1}^{\infty} \in \mathbb{K}^{\mathbb{N}} :
\sum_{n=1}^{\infty} \abs{x_n}^{p} < \infty \right\}.
\end{equation*}
Furthermore, for $x \in \ell_p$, we define $\norm{x}_p = \left( \sum_{n \geq
1} \abs{x_n}^p \right)^{1 / p}$.
When $p = \infty$, we define
\begin{equation*}
\ell_\infty = \ell_\infty(\mathbb{N})
\coloneqq \left\{ x = (x_n)_{n=1}^{\infty} \in \mathbb{K}^{\mathbb{N}}
\sup_{n \geq 1} \abs{x_n} < \infty \right\}.
\end{equation*}
For $x \in \ell_\infty$, we define $\norm{x}_{\infty} = \sup_{n \geq 1}
\abs{x_n}$.
\begin{enumerate}
\item Prove Hólder's Inequality for $\ell_p$-spaces:
that is if $1 \leq p \leq \infty$ and $\frac{1}{p} + \frac{1}{q} = 1$,
and if $x = (x_n)_n \in \ell_p$ and $y = (y_n)_n \in \ell_q$,
then
\begin{equation*}
xy \coloneqq (x_n y_n)_n \in \ell_1,
\end{equation*}
and $\norm{xy}_1 \leq \norm{x}_p \norm{y}_q$.
\end{enumerate}
\end{assgprob}
% section assignment_4 (end)
% chapter assignment_problems (end)
\backmatter
\fancyhead[LE]{\thepage \enspace \textsl{\leftmark}}
\bibliography{references}
\printindex
\end{document}
% vim:tw=80:fdm=syntax
| {
"alphanum_fraction": 0.6119404668,
"avg_line_length": 39.7534737786,
"ext": "tex",
"hexsha": "fb21baa503cf332bfd28c9b2c3bf004cdc23b8fc",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2017-09-27T20:55:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-09-27T20:55:58.000Z",
"max_forks_repo_head_hexsha": "5814c8682addc5dd6f9a323758f87e4c4ca57b8e",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "japorized/TeX_notes",
"max_forks_repo_path": "PMATH450/classnotes.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "5814c8682addc5dd6f9a323758f87e4c4ca57b8e",
"max_issues_repo_issues_event_max_datetime": "2021-03-29T17:58:51.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-29T17:58:51.000Z",
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "japorized/TeX_notes",
"max_issues_repo_path": "PMATH450/classnotes.tex",
"max_line_length": 256,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "5814c8682addc5dd6f9a323758f87e4c4ca57b8e",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "japorized/TeX_notes",
"max_stars_repo_path": "PMATH450/classnotes.tex",
"max_stars_repo_stars_event_max_datetime": "2020-11-21T01:41:27.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-09-28T21:23:05.000Z",
"num_tokens": 172448,
"size": 443450
} |
%===============================================================================
%======= Document Information ==========================================
%===============================================================================
\title{CS 682: Project Report}
\author{Terence Henriod}
\date{\today}
\documentclass[11pts]{article}
%===============================================================================
%======= Packages Used =================================================
%===============================================================================
%\usepackage{qtree}
\usepackage{amsmath}
\usepackage{amssymb}
%\usepackage[named]{algo}
\usepackage{verbatim}
%\usepackage{graphicx}
% \DeclareGraphicsExtensions{.pdf,.png,.jpg}
\usepackage[right=1in,top=1in,left=1in,bottom=1in]{geometry}
%===============================================================================
%======= Document Settings =============================================
%===============================================================================
\setlength{\topmargin}{-1cm}
\setlength{\oddsidemargin}{0in}
\setlength{\textwidth}{6.5in}
\setlength{\textheight}{8.3in}
%===============================================================================
%======= User Defined Commands =========================================
%===============================================================================
\newcommand{\BigO}[1]{\ensuremath{\operatorname{O}\bigl(#1\bigr)}}
\newcommand{\BigTheta}[1]{\ensuremath{\operatorname{\Theta}\bigl(#1\bigr)}}
\newcommand{\BigOmega}[1]{\ensuremath{\operatorname{\Omega}\bigl(#1\bigr)}}
%===============================================================================
%======= The Document ==================================================
%===============================================================================
\begin{document}
\maketitle
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
%~~~~~~~ Abstract ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
\begin{abstract}
For this project an attempt was made to find a way to correct errors made by Optical Character Recognition methods in the scanning of French documents from 1880-1910 for historical research. While no final results were reached, some groundwork was laid for future endeavors.
\end{abstract}
\newpage
\section{Project Description}
Dr. Christopher Church of the University of Nevada, Reno does research by studying historical context through sentiment analysis of news documents from relevant periods (specifically periodicals from 1880-1910 written in French). Dr. Church has experienced difficulty due to the fact that Optical Character Recognition (OCR) techniques are imperfect when scanning documents that are not high quality.
At the suggestion of Dr. Richard Kelley, a spelling correction approach was agreed upon. Since many of the words (or even characters) from the documents are correctly produced by the OCR methods, it is feasible to assume that a few words would be easily corrected with spelling correction techniques.
It has been demonstrated that spelling correction can be done with ``toy" effectiveness with a simple approach. Peter Norvig has demonstrated (in a highly referenced blog post found at: http://norvig.com/spell-correct.html) that using simple edits of misspelled words, possible correct spellings within a relatively short ``edit distance" of the misspelled word can be used along with a dictionary of correctly spelled words and their relative frequencies to determine a probable replacement for the misspelled word.
This concept can be improved upon, however, by using context to find better replacements for misspelled words. As Norvig has suggested, using adjacent words in a sentence is a promising extension of the basic spelling corrector he describes.
Dr. Kelley suggests using the Google N-gram data, which is freely available and quite sizable. Google's data spans over one hundred years, and includes many languages, including French. Google provides data for 1- to 5-grams, inclusive.
For this project, the Java programming language was chosen because it offers a good balance of cross-platform portability and performance, relative to some other languages. There is also a large number of companion libraries and development tools.
\section{Data Collection}
Collecting and managing the data for this project was a large task in and of itself. Google's N-gram data is so extensive that it took multiple machines several days to download and filter the data. While this was a simple task (a simple Java filter was written in under an hour, internet request and tar.gz stream library research and all), the data set is just so large that it presents issues in and of itself. Further, possible encoding issues may have arisen - the resulting files featured ``words" that consisted of symbols such as lone commas. This should be easily remedied, but character encoding is an important issue that must be addressed for this task for more reasons than simple correct data transmission.
It should also be noted that the amount of data was incredibly large. The subset of French N-gram data that consisted of words used in 1880-1910 documents was over 600 GB. Not only is this far more data than can fit into the RAM of a typical computer system, it is more than a typical hard-drive can hold if it is not dedicated for this purpose. This increases the difficulty of accessing the data as necessary.
\section{Basic Spelling Corrector}
A basic spelling corrector was written according to Norvig's algorithm in Java. Currently it has precisely the same spelling correction as Norvig's Python version, and roughly the same speed performance, the Java version is perhaps slightly faster. However, it is expected that the Java version will be more easily expandable. It currently features the ability to be trained using N-gram data, although this capability is currently untested.
\section{Performance for the Task}
Unfortunately, the simple spelling corrector has not been tested on the task as of yet. This is largely due to the fact that the Google N-gram data is difficult to manage and utilize. Further, this is partly due to the fact that spelling corrections cannot be verified without someone who knows French.
\section{Future Work}
In order to make a fully functional French post-processor, I recommend work in the following three areas:
\subsection{Character Encoding}
Character encoding is a vital issue to address. Since there are many characters used in French that exist in the Unicode standard, but not the ASCII one, \emph{a functional spelling corrector must handle UTF-8 or UTF-16 character encoding}. This should be relatively simple, but may require searching for suitable helper libraries if the standard Java String proves too unwieldy for the task.
\subsection{Managing the Data}
The data is large. It comes in files as large as 19 GB. There are hundreds of files. For this reason, a special indexing scheme should be considered to help improve manageability and performance. Dr. Kelley has also suggested compression, perhaps using the Google Protocol Buffer library.
\subsection{Addition of Context Utilization}
The whole aim of the project was to create a spell checker that uses context to achieve effective word correction. This functionality will rely less on coding algorithmic logic and more on finding a way to effectively search and utilize the N-gram data.
\section{Conclusion}
What this project may produce in the time to come is exciting - both in terms of producing an effective system for mitigating OCR errors in languages other than English as well as producing a homegrown system that is capable of effectively managing a very large dataset. This may result in some kind of distributed system, although it would be particularly exciting if it could be done on a single, high-performance machine.
\end{document} | {
"alphanum_fraction": 0.6699604743,
"avg_line_length": 82.612244898,
"ext": "tex",
"hexsha": "a84113268b9ac9131262d1101d22f0143075133c",
"lang": "TeX",
"max_forks_count": 25,
"max_forks_repo_forks_event_max_datetime": "2020-12-29T13:23:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-10-18T03:31:44.000Z",
"max_forks_repo_head_hexsha": "0edc83a7bf09515f0d01d23a26df2ff90c0f458a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "T-R0D/Past-Courses",
"max_forks_repo_path": "CS682/FinalReport/Project_Report.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "0edc83a7bf09515f0d01d23a26df2ff90c0f458a",
"max_issues_repo_issues_event_max_datetime": "2021-05-29T19:54:52.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-05-29T19:54:02.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "T-R0D/Past-Courses",
"max_issues_repo_path": "CS682/FinalReport/Project_Report.tex",
"max_line_length": 720,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "0edc83a7bf09515f0d01d23a26df2ff90c0f458a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "T-R0D/Past-Courses",
"max_stars_repo_path": "CS682/FinalReport/Project_Report.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-27T16:51:22.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-03-13T17:32:26.000Z",
"num_tokens": 1514,
"size": 8096
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% HBOOK - Reference Manual -- LaTeX Source %
% %
% Front Material: Title page, %
% Copyright Notice %
% Preliminary Remarks %
% Table of Contents %
% EPS file : cern15.eps, cnastit.eps %
% %
% Editor: Michel Goossens / CN-AS %
% Last Mod.: 9 December 1993 11:40 mg %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Tile page %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\notHTML{\def\Ptitle#1{\special{ps: /Printstring (#1) def}
\epsfbox{/usr/local/lib/tex/ps/cnastit.eps}}}
\HTML{\def\Ptitle#1{<B>#1</B>}}
\begin{titlepage}
\notHTML{\vspace*{-23mm}}%
\notHTML{\mbox{\epsfig{file=/usr/local/lib/tex/ps/cern15.eps,height=30mm}}}%
\HTML{<IMG SRC="../cernlogo.gif">}%
\hfill
\raise8mm\hbox{\Large\bf CERN Program Library Long Writeup Y250}
\hfill\mbox{}
\HTML{<P>}
\begin{center}
\mbox{}\\[10mm]
\mbox{\Ptitle{HBOOK}}\\[2cm]
\HTML{<P>\\}
{\LARGE Reference Manual}\\[2cm]
\HTML{<P>\\}
{\LARGE Version 4.22}\\[3cm]
\HTML{<P>\\}
{\Large Application Software Group}\\[1cm]
{\Large Computing and Networks Division}\\[2cm]
\end{center}
\HTML{<P>}
\notHTML{\vfill}%
\begin{center}\Large CERN Geneva, Switzerland\end{center}
\end{titlepage}
\Filename{H1Preface}
\HTML{<H1>Preface</H1>}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Copyright page %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\HTML{\PRE}
\thispagestyle{empty}
\framebox[\textwidth][t]{\hfill\begin{minipage}{0.96\textwidth}%
\vspace*{3mm}\begin{center}Copyright Notice\end{center}
\parskip\baselineskip
{\bf HBOOK -- Statistical Analysis and Histogramming}
CERN Program Library entry {\bf Y250}
\copyright{} Copyright CERN, Geneva 1993
Copyright and any other appropriate legal protection of these
computer programs and associated documentation reserved in all
countries of the world.
These programs or documentation may not be reproduced by any
method without prior written consent of the Director-General
of CERN or his delegate.
Permission for the usage of any programs described herein is
granted apriori to those scientific institutes associated with
the CERN experimental program or with whom CERN has concluded
a scientific collaboration agreement.
Requests for information should be addressed to:
\vspace*{-.5\baselineskip}
\begin{center}
\tt\begin{tabular}{l}
CERN Program Library Office \\
CERN-CN Division \\
CH-1211 Geneva 23 \\
Switzerland \\
Tel. +41 22 767 4951 \\
Fax. +41 22 767 7155 \\
Bitnet: CERNLIB@CERNVM \\
DECnet: VXCERN::CERNLIB (node 22.190) \\
Internet: [email protected]
\end{tabular}
\end{center}
\vspace*{2mm}
\end{minipage}\hfill}%end of minipage in framebox
\vspace{6mm}
\HTML{<P>}
{\bf Trademark notice: All trademarks appearing in this guide are acknowledged as such.}
\vfill
\HTML{<P>}
\begin{tabular}{l@{\quad}l@{\quad}>{\tt}l}
{\em Contact Person\/}: & Ren\'e Brun /CN & (BRUN\atsign CERNVM.CERN.CH)\\[1mm]
{\em Technical Realization\/}: & Michel Goossens /CN & (michel.goossens\atsign cern.ch)\\[1cm]
{\em Edition -- June 1994}
\end{tabular}
\HTML{\ePRE}%
\newpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Introductory material %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\pagenumbering{roman}
\setcounter{page}{1}
\Filename{H2History}
\section*{History}
HBOOK is a Fortran\footnote{A C interface is also distributed by
the CERN Program Library, created using the tool f2h} callable
package for histogramming and fitting.
It was originally developed in the 1970s and has since
undergone continuous evolution culminating
in the current version, HBOOK~4.
Many people have contributed to the design and development of HBOOK,
through discussions, comments and suggestions.
% Since the very beginning the main author and coordinator of the
% HBOOK program has been Ren\'e Brun. He still keeps a detailed interest in
% future developments, but the formal responsability for
% the package now lies with Jamie Shiers.
Paolo Palazzi was involved in the original design.
D.~Lienart has been in charge of the parametrization part.
Fred~James is the author of routine \Rind{HDIFF} and of the minimization
package Minuit, which forms the basis of the fitting routines.
The idea of Profile histograms has been taken from the HYDRA system.
The Column-wise-Ntuple routines were implemented by Fons Rademakers.
The multi-dimensional quadratic fit package \Rind{HQUAD} is the work of
John Allison.
J.~Linnemann and his colleagues of the D0 experiment contributed
the routine \Rind{HDIFFB}.
Pierre Aubert is the author of the routines to associate labels
with histograms.
Roger Barlow and Christine Beeston (OPAL) have developed the
\Rind{HMCMLL} package.
\Filename{H2Preliminary-remarks}
\section*{Preliminary remarks}
This manual serves at the same time as a {\bf Reference manual}
and as a {\bf User Guide} for the HBOOK system.
After a short introductory chapter, where the basic ideas
are explained, the following chapters describe in detail
the calling sequences for the different user routines.
In this manual
examples are in {\tt monotype face} and strings to be input by the user
are {\tt\underline{underlined}}.
In the index the page where a routine is defined is in {\bf bold},
page numbers where a routine is referenced are in normal type.
In the description of the routines a \Lit{*} following
the name of a parameter indicates that this is an {\bf output} parameter.
If another \Lit{*} precedes a parameter in the calling sequence, the
parameter in question is both an {\bf input} and {\bf output} parameter.
This document has been produced using \LaTeX~\cite{bib-LATEX}
with the \Lit{cernman} style option, developed at CERN.
A gzip compressed PostScript file \Lit{hbook.ps.gz},
containing a complete printable version
of this manual, can be obtained from any CERN machine
by anonymous ftp as follows
(commands to be typed by the user are underlined):
\vspace*{3mm}
\begin{XMP}
\underline{ftp asis01.cern.ch}
Trying 128.141.201.136...
Connected to asis01.cern.ch.
220 asis01 FTP server (Version 6.10 ...) ready.
Name (asis01:username): \underline{anonymous}
Password: \underline{your\_{}mailaddress}
230 Guest login ok, access restrictions apply.
ftp> \underline{cd cernlib/doc/ps.dir}
ftp> \underline{get hbook.ps.gz} (type \Ucom{get hbook.ps} for the uncompressed version)
ftp> \underline{quit}
\end{XMP}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Tables of contents ... %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\tableofcontents
%\newpage
\listoffigures
\listoftables
| {
"alphanum_fraction": 0.5925303696,
"avg_line_length": 39.2791878173,
"ext": "tex",
"hexsha": "3fecd223a1fe7ca125eff33a83a32980b5324062",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "berghaus/cernlib-docs",
"max_forks_repo_path": "topeterf/hbofront.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "berghaus/cernlib-docs",
"max_issues_repo_path": "topeterf/hbofront.tex",
"max_line_length": 95,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "berghaus/cernlib-docs",
"max_stars_repo_path": "topeterf/hbofront.tex",
"max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z",
"num_tokens": 1833,
"size": 7738
} |
%!TEX TS-program = lualatex
%!TEX encoding = UTF-8 Unicode
\documentclass[12pt, addpoints]{exam}
%\printanswers
\usepackage{fontspec}
\setmainfont[Ligatures={TeX}, BoldFont={* Bold}, ItalicFont={* Italic}, BoldItalicFont={* BoldItalic}, Numbers={Monospaced, Lining}]{Linux Libertine O}
\setsansfont[Scale=MatchLowercase,Ligatures=TeX]{Linux Biolinum O}
\setmonofont[Scale=MatchLowercase]{Inconsolatazi4}
\newfontfamily{\tablenumbers}[Numbers={Monospaced,Lining}]{Linux Libertine O}
\usepackage{microtype}
\usepackage{graphicx}
\graphicspath{{/Users/goby/Pictures/teach/163/activities/}}
\usepackage{amsmath}
%\usepackage{unicode-math}
%\setmathfont[Scale=MatchLowercase]{TeX Gyre Termes Math}
\usepackage{geometry}
\geometry{letterpaper, left=1in, bottom=1in}
\usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent
%\usepackage{siunitx}
\usepackage{booktabs}
\usepackage{array}
\newcolumntype{L}[1]{>{\raggedright\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
\newcolumntype{C}[1]{>{\centering\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
\newcolumntype{R}[1]{>{\raggedleft\let\newline\\\arraybackslash\hspace{0pt}}m{#1}}
\usepackage{wrapfig}
\usepackage{threeparttable}
\usepackage[justification=raggedright, labelsep=period]{caption}
\captionsetup{singlelinecheck=off}
\captionsetup{skip=0.2em}
\usepackage{enumitem}
\setlist{leftmargin=*}
\setlist[1]{labelindent=\parindent}
\setlist[enumerate]{label=\textsc{\alph*}.}
%\setlist[itemize]{label=\color{gray}\textbullet}
%\usepackage{hanging}
\usepackage[sc]{titlesec}
\newcommand{\sws}{\textsc{sws}}
\newcommand{\mws}{\textsc{mws}}
\newcommand{\lws}{\textsc{lws}}
%% Commands for Exam class
\newlength{\myindent}
\setlength{\myindent}{\parindent}
\newcommand{\ind}{\hspace*{\myindent}}
\renewcommand{\solutiontitle}{\noindent}
\unframedsolutions
\SolutionEmphasis{\bfseries}
\renewcommand{\questionshook}{%
\setlength{\leftmargin}{-\leftskip}%
}
\pagestyle{headandfoot}
\firstpageheader{\textsc{bi}\,163 Evolution and Ecology}{}{\ifprintanswers\textbf{KEY}\else Name: \enspace \makebox[2.5in]{\hrulefill}\fi}
\runningheader{Evolution of primate trichromacy}{}{\footnotesize{pg. \thepage}}
\footer{}{}{}
\runningheadrule
\newcommand*\AnswerBox[2]{%
\parbox[t][#1]{0.92\linewidth}{%
\begin{solution}#2\end{solution}}
\vspace{\stretch{1}}
}
\begin{document}
\subsection*{Evolution of trichromacy in primates (\numpoints~points)}
Most animals see color because of proteins in the retina of the ey called opsins. Different opsins are sensitive to different wavelengths of light that together determine how an animal perceives color. Most animals have 2–4 color opsins although some have only one and some have as many as 11 different opsins.
\begin{wrapfigure}[23]{r}{0.5\linewidth}
\vspace{-2\baselineskip}
\centering\includegraphics[width=0.99\linewidth]{primate_phylogeny_vertical}\par
\caption{Phylogeny of the primates. Letters indicate clade ancestors listed in Table~\ref{tab:divergence}.}\label{fig:primate_phylogeny}
\end{wrapfigure}\par
Most mammals have only two opsins. \textit{Short wavelength sensitive} opsin (\sws{}) is most sensitive to wavelengths at the blue end of the light spectrum. \textit{Medium wavelength sensitive} opsin (\mws{}) is most sensitive to the middle green part of the spectrum. Animals with \sws{} and \mws{} opsins are called \textit{dichromats.} Some primates, however, have a third opsin called \textit{long wavelength sensitive} opsin (\lws{}) that is most sensitive to the yellow-red end of the spectrum. Animals with three opsins are called \textit{trichromats.}
Here, you will explore the evolution and timing of trichromacy in primates.
\subsubsection*{Primate phylogeny}
The oldest known fossil primate (Fig.~\ref{fig:primate_phylogeny}, branch \textsc{a}) dates back to nearly 56 million years ago (\textsc{mya}) but a \emph{molecular clock} based on many genes suggests the primate ancestor may have lived even earlier. You will use a molecular clock to estimate the age of the common ancestors shown in Fig.~\ref{fig:primate_phylogeny}, starting on the next page.
%The first step is for you to estimate the time since each group last shared a common ancestor by using a \emph{molecular clock.}
%{\centering
%\noindent\includegraphics[width=\linewidth]{primate_phylogeny}\par
%}
%\subsubsection*{Molecular clocks and primate ancestors}
\newpage
\begin{questions}
\question[2]
The mutation rate for the \textit{cytochrome~b} gene in primates is 0.2\% per million years. %Irwin~et~al.~(1991)
Divide the average mutation rate into the values for genetic difference to estimate the time (\textsc{mya}) since each clade last shared an ancestor (Table~\ref{tab:divergence}).
{\renewcommand{\arraystretch}{1.2}
\begin{threeparttable}
\caption{Nine primate clades and the genetic difference within each clade. Not all ancestors are labeled on the tree.}\label{tab:divergence}
\begin{tabular}{@{}llrR{1in}@{}}
\toprule
Node & Ancestor & Genetic difference & Time (\textsc{mya}) \tabularnewline
\midrule
A & Primate & 16.9\% & \ifprintanswers 81.9 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
B & Haplorhini & 15.7\% & \ifprintanswers 77.3 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
C & Stepsirrhini & 14.2\% & \ifprintanswers 62.6 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
D & Anthropoidea & 8.1\% & \ifprintanswers 44.6 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
E & Catarrhini & 5.8\% & \ifprintanswers 30.0 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
F & Platyrrhini & 4.8\% & \ifprintanswers 22.2 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
— & \textit{Cebus\,–\,Callothrix} & 5.4\% & \ifprintanswers 23.9 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
G & Cercopithecoidea & 2.8\% & \ifprintanswers 14.5 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
— & \textit{Homo\,–\,Pongo} & 3.3\% & \ifprintanswers 17.9 \else \newline\rule{0.9in}{0.4pt}\fi \tabularnewline
\bottomrule
\end{tabular}
\end{threeparttable}}
\bigskip
\question[2]
Plot the relationship between genetic difference and time since common ancestor in Fig.~\ref{fig:divergence_plot} on page~\pageref{tab:divergence}. Add a trend line to show the “average” relationship between genetic difference and time since common ancestry for the nine primate clades listed in Table~\ref{tab:divergence}.
\parbox{\linewidth}{%
\ifprintanswers
\includegraphics[width=\linewidth]{primate_divergence_plot_key}
\else
\includegraphics[width=\linewidth]{primate_divergence_plot_blank}
\fi
\captionof{figure}{Plot of genetic differences and time since common ancestor for nine primate clades.}\label{fig:divergence_plot}\par
}
\subsubsection*{Gene duplication and amino acid replacements}
Opsins, like all proteins, are made from individual \textit{amino acids.} Amino acids are classified as negatively charged, positively charged, non-polar, and polar. The amino acids determine the structure and function of the protein. Amino acids in a protein are changed by mutations to the gene that encodes the protein.
\begin{minipage}[t]{0.48\linewidth}
The long wavelength opsin evolved by \textit{gene duplication} of the \mws{} opsin gene. Mutations to the copy changed several amino acids in the protein. Some mutations changed the wavelength sensitivity of the opsin protein.
\bigskip
The mutations that changed the function of the long wavelength opsin happened because non-polar amino acids were replaced by polar amino acids. Table~\ref{tab:amino_acid_properties} lists some non-polar and polar amino acids.
\end{minipage}%
\hfill
\begin{minipage}[t]{0.48\linewidth}\vspace{-10pt}%
\begin{threeparttable}[t]
\captionsetup{type=table, position=top,skip=0pt}
\captionof{table}{Some non-polar and polar amino acids, and their abbreviations.}\label{tab:amino_acid_properties}
\begin{tabular}{@{}ll@{}}
\toprule
Non-polar & Polar \tabularnewline
\midrule
Alanine (Ala) & Asparagine (Asn) \tabularnewline
Isoleucine (Ile) & Glutamine (Gln) \tabularnewline
Leucine (Leu) & Serine (Ser) \tabularnewline
Methionine (Met) & Threonine (Thr) \tabularnewline
Phenylalanine (Phe) & Tyrosine (Tyr) \tabularnewline
Proline (Pro) & \tabularnewline
Valine (Val) & \tabularnewline
\bottomrule
\end{tabular}\par
\end{threeparttable}\par
\end{minipage}
\bigskip
\question[1]
Below are some non-polar and polar amino acids that differ between the \mws{} and \lws{} opsins. Place an {\large $\times$} below each \textit{non-polar to polar} amino acid change.
\begin{tabular}{@{}*{14}{l}@{}}
\toprule
& 111 & 116 & 153 & 171 & 180 & 236 & 274 & 275 & 277 & 279 & 285 & 296 \tabularnewline
\midrule
\textsc{mws} & Val & Tyr & Met & Ile & Ala & Val & Val & Leu & Phe & Phe & Ala & Pro \tabularnewline
\textsc{lws} & Ile & Ser & Leu & Val & Ser & Met & Ile & Phe & Tyr & Val & Thr & Ala \tabularnewline
\midrule\noalign{\medskip}
{\Large $\times$} & \rule{0.5cm}{0.4pt}
& \rule{0.5cm}{0.4pt} & \rule{0.5cm}{0.4pt}
& \rule{0.5cm}{0.4pt} & \ifprintanswers{\large $\times$}\else\rule{0.5cm}{0.4pt}\fi
& \rule{0.5cm}{0.4pt} & \rule{0.5cm}{0.4pt}
& \rule{0.5cm}{0.4pt} & \ifprintanswers{\large $\times$}\else\rule{0.5cm}{0.4pt}\fi
& \rule{0.5cm}{0.4pt} & \ifprintanswers{\large $\times$}\else\rule{0.5cm}{0.4pt}\fi
& \rule{0.5cm}{0.4pt} \tabularnewline
\bottomrule
\end{tabular}
Each type of non-polar to polar amino acid changes the wavelength sensitivity by a different amount but the average shift is about 10~nm.
\question[1]\label{ques:long_peak}
The peak sensitivity of \mws{}~opsin is around 541 nanomters. For each non-polar to polar change, add 10 nm to the peak sensitivity for \mws{}~opsin to calculate the peak sensitivity for \lws{}~opsin.
\medskip
What is the peak sensitivity for \lws{} opsin? \rule{1in}{0.4pt}~nm.
\question[1]
Add a relative sensitivity curve for the \lws{}~opsin to Fig.~\ref{fig:relative_sensitivity}. Draw your curve just like the curve for \mws{}~opsin but shifted to the right by the value you calculated in question~\ref{ques:long_peak}.
\parbox{\linewidth}{
\ifprintanswers
\includegraphics[width=\linewidth]{trichromat_plain_plot}
\else
\includegraphics[width=\linewidth]{dichromat_plain_plot}
\fi
\captionof{figure}{Relative sensitivity for short, medium, and long wavelength opsins.}\label{fig:relative_sensitivity}\par
}
\newpage
\question[3]
Studies across many genera from all of the primate clades shown in Fig.~\ref{fig:primate_phylogeny} have found that only great apes and Old World monkeys have the long wavelength gene. Based on the phylogeny and your calculations from the molecular clock, how long ago do you estimate that the gene duplication must have occurred? Explain.
\AnswerBox{0.3\textheight}{No more than about 40 million years ago because all great apes and Old World monkeys have the gene. It probably occurred in the ancestor labeled E.}
\question
\textsc{Discussion question:} New World monkeys do not have the \lws{}~opsin gene yet females in some New World genera can be at least partially sensitive to long wavelengths, and sometimes as sensitive as Old World monkeys and great apes. How could this be so? \textsc{Hints:} The \mws{}~opsin is on the X-chromosome. \emph{Each} non-polar to polar mutation at the three sites causes a slight increase in the wavelength sensitivity.
\AnswerBox{0.3\textheight}{Mutations create alleles of the \mws{} gene. Some mutations have occurred at the amino acid sites shown above. Females have two X chromosomes so can end up with two different alleles, creating variation in the color vision.}
\end{questions}
\end{document} | {
"alphanum_fraction": 0.7555536569,
"avg_line_length": 47.7714285714,
"ext": "tex",
"hexsha": "56e923d71657770d4bfdbb4dc58dc1e3fc8d3dfa",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "670db734c68195edb7af76a2feee7bcb166fdffc",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mtaylor-semo/163",
"max_forks_repo_path": "activities/trichromacy/trichromacy_activity.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "670db734c68195edb7af76a2feee7bcb166fdffc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mtaylor-semo/163",
"max_issues_repo_path": "activities/trichromacy/trichromacy_activity.tex",
"max_line_length": 560,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "670db734c68195edb7af76a2feee7bcb166fdffc",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mtaylor-semo/163",
"max_stars_repo_path": "activities/trichromacy/trichromacy_activity.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3688,
"size": 11704
} |
\clearpage
\subsection{Return Statement} % (fold)
\label{sub:return_statement}
\csyntax{csynt:function-decl-return-statement}{a Return Statement}{function-decl/return-statement}
% subsection return_statement (end) | {
"alphanum_fraction": 0.8046511628,
"avg_line_length": 30.7142857143,
"ext": "tex",
"hexsha": "7934460c0a793f0fb167a379bb7b1516f168eeea",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T07:42:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-02T03:18:37.000Z",
"max_forks_repo_head_hexsha": "8f3040983d420129f90bcc4bd69a96d8743c412c",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "macite/programming-arcana",
"max_forks_repo_path": "topics/function-decl/c/c-return-statement.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_issues_repo_issues_event_max_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "thoth-tech/programming-arcana",
"max_issues_repo_path": "topics/function-decl/c/c-return-statement.tex",
"max_line_length": 98,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "thoth-tech/programming-arcana",
"max_stars_repo_path": "topics/function-decl/c/c-return-statement.tex",
"max_stars_repo_stars_event_max_datetime": "2021-08-10T04:50:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-10T04:50:54.000Z",
"num_tokens": 54,
"size": 215
} |
\SetAPI{nothing}
\section{Entity handling}
\label{feature:EntityHandling}
\ClearAPI
\TODO
\subsection{Supported entity annotations}
\begin{itemize}
\item \prettyref{annotation:Cascade}
\item \prettyref{annotation:EntityEqualsAspect}
\item \prettyref{annotation:FireThisOnPropertyChange}
\item \prettyref{annotation:FireTargetOnPropertyChange}
\item \prettyref{annotation:IgnoreToBeUpdated}
\item \prettyref{annotation:ParentChild}
\item \prettyref{annotation:PropertyAccessor}
\item \textit{javax.persistence.Embeddable}
\item \textit{javax.persistence.PostLoad}
\item \textit{javax.persistence.PrePersist}
\end{itemize}
| {
"alphanum_fraction": 0.8177496038,
"avg_line_length": 33.2105263158,
"ext": "tex",
"hexsha": "1fd66a94222d55ca9a4b3daadcc4165d2cee0762",
"lang": "TeX",
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2022-01-08T12:54:51.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-10-28T14:05:27.000Z",
"max_forks_repo_head_hexsha": "8552b210b8b37d3d8f66bdac2e094bf23c8b5fda",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Dennis-Koch/ambeth",
"max_forks_repo_path": "doc/reference-manual/tex/feature/EntityHandling.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "8552b210b8b37d3d8f66bdac2e094bf23c8b5fda",
"max_issues_repo_issues_event_max_datetime": "2022-01-21T23:15:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-04-24T06:55:18.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Dennis-Koch/ambeth",
"max_issues_repo_path": "doc/reference-manual/tex/feature/EntityHandling.tex",
"max_line_length": 56,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8552b210b8b37d3d8f66bdac2e094bf23c8b5fda",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Dennis-Koch/ambeth",
"max_stars_repo_path": "doc/reference-manual/tex/feature/EntityHandling.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 175,
"size": 631
} |
\documentclass[a4paper,11pt,fleqn]{report}
\usepackage[dvipdfmx]{graphicx}
\usepackage{amsmath,amssymb}
\usepackage{bm}
%\usepackage{newpxtext} % Palatino
%\usepackage{newtxtext} % Times
\begin{document}
\title{ Variational Bayes-Hidden Markov Model Analysis for Time Series Data %\\ --- FRET, Single-molecule Intensity, Diffusion and General Applications ---
}
\author{ Kenji Okamoto }
\date{\today}
\maketitle
\tableofcontents
\chapter{ General Solution }
\input{ solveVarBayes.tex }
\input{ solveHmm.tex }
\input{ solveVbHmm.tex }
\input{ solveGlobalVbHmm.tex }
%%%%%
%\chapter{ Time Stamped Photons and FRET }
%\input{ vbHmmTsTimeDepend.tex }
%\input{ vbHmmTsK.tex }
%\input{ vbHmmTsFretKIE.tex }
%\chapter{ Single Photon Counting and FRET }
%\input{ vbHmmPcA.tex }
%\input{ vbHmmPcFret.tex }
\chapter{ Gaussian Time Series }
\input{ vbHmmGauss.tex }
\input{ vbHmm_gaussIntensity.tex }
\input{ vbHmm_gaussDiffusion.tex }
%\appendix
%\chapter{分布関数}
%\input{ funcs.tex }
\end{document}
%%
| {
"alphanum_fraction": 0.736318408,
"avg_line_length": 21.847826087,
"ext": "tex",
"hexsha": "9c9d79bd1a3dfe933040e0282f8347afdacf930d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "77afe3c336c9e1ebeb115ca4f0b2bc25060556bd",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "okamoto-kenji/varBayes-HMM",
"max_forks_repo_path": "TeX/derivation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "77afe3c336c9e1ebeb115ca4f0b2bc25060556bd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "okamoto-kenji/varBayes-HMM",
"max_issues_repo_path": "TeX/derivation.tex",
"max_line_length": 156,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "77afe3c336c9e1ebeb115ca4f0b2bc25060556bd",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "okamoto-kenji/varBayes-HMM",
"max_stars_repo_path": "TeX/derivation.tex",
"max_stars_repo_stars_event_max_datetime": "2019-11-01T06:35:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-03-31T06:59:00.000Z",
"num_tokens": 316,
"size": 1005
} |
\renewcommand{\leftmark}{Biography}
\chapter*{Biography}
\addcontentsline{toc}{chapter}{Biography}
Valter Vasić was born in Pula in 1987. He received his M.Sc. degree in
information and communication technology from University of Zagreb in 2010.
After that he started working as a research associate at the University of
Zagreb, Faculty of Electrical Engineering and Computing as a developer for the
IMUNES network emulator. In 2010 he started his Ph.D. in Computer Science under
the supervision of associate professor Miljenko Mikuc, Ph.D. He was researcher
on the E-IMUNES project funded by Ericsson Nikola Tesla, Zagreb and actively
contributed to various project with the industry. He published more than 10
papers in journals and conference proceedings. Since 2015 he is a member of the
Honeynet project and a management committee member for the COST Action IC1306
``Cryptography for Secure Digital Interaction''. The focus of his research is
secure network communication. His research interests are in the area of network
communication, computer security and virtualization.
| {
"alphanum_fraction": 0.8190212373,
"avg_line_length": 60.1666666667,
"ext": "tex",
"hexsha": "a597aea5b875eefec4e13cc7dc1a77e63d6a574d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f5d9853ef9dc3a984e666911634cde94011e576f",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "oikuda/phd-thesis",
"max_forks_repo_path": "tex/biography.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f5d9853ef9dc3a984e666911634cde94011e576f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "oikuda/phd-thesis",
"max_issues_repo_path": "tex/biography.tex",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f5d9853ef9dc3a984e666911634cde94011e576f",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "oikuda/phd-thesis",
"max_stars_repo_path": "tex/biography.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 246,
"size": 1083
} |
\documentclass{cup-ino}
\usepackage[utf8]{inputenc}
\usepackage{longtable}
\usepackage{placeins}
\usepackage{blindtext,alltt}
\usepackage{graphicx}
\bibliographystyle{acm}
\graphicspath{ {./imgs/} }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Full title
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{TEMPLATE PROJECT WRITE UP}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Shorter title, for use in page header
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\runningtitle{CS272 Project TEMPLATE}
\author{Sam Showalter}
\begin{document}
\maketitle
% \cite{LilianWeng}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Abstract
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{abstract}
ABSTRACT
\end{abstract}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Introduction
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\vspace{5mm}
\section{Introduction}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Feature engineering
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\vspace{2mm}
\section{Feature Engineering}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Methods
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\vspace{2mm}
\section{Methods}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Model Selection
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\vspace{2mm}
\section{Model Selection and Experiments}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Conclusion
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\vspace{2mm}
\section{Conclusion}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Bibliography
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\bibliography{refs.bib}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Appendix
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\newpage
\section{Appendix}
\end{document}
| {
"alphanum_fraction": 0.3348958333,
"avg_line_length": 21.8181818182,
"ext": "tex",
"hexsha": "45ee8234b0c07cbe5d4fc05e83895abb93bf0350",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6914064018fbcd416c2f9b24ee105319d02b3cb2",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "SamShowalter/CS272-NLP",
"max_forks_repo_path": "template_old/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6914064018fbcd416c2f9b24ee105319d02b3cb2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "SamShowalter/CS272-NLP",
"max_issues_repo_path": "template_old/main.tex",
"max_line_length": 52,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6914064018fbcd416c2f9b24ee105319d02b3cb2",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "SamShowalter/CS272-NLP",
"max_stars_repo_path": "template_old/main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 333,
"size": 1920
} |
\newpage
\documentclass{article}
\begin{document}
\section{Science Discoveries}
Some of the \textbf{greatest}
discoveries in \underline{science}
were made by
\textbf{\textit{accident}}.
\paragraph{}
Some of the greatest discoveries in science were made by \emph{accident}.
\end{document} | {
"alphanum_fraction": 0.7558528428,
"avg_line_length": 21.3571428571,
"ext": "tex",
"hexsha": "fccfe4a9d810716707008ed03beacdcd59b60678",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7c1c2a19ffc9bb85c79aad9fc87f663bbe0123f6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tolindatboi/tobiCSC101",
"max_forks_repo_path": "practice2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7c1c2a19ffc9bb85c79aad9fc87f663bbe0123f6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tolindatboi/tobiCSC101",
"max_issues_repo_path": "practice2.tex",
"max_line_length": 74,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7c1c2a19ffc9bb85c79aad9fc87f663bbe0123f6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tolindatboi/tobiCSC101",
"max_stars_repo_path": "practice2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 90,
"size": 299
} |
The \ac{ahci} is a standard by Intel that defines a common API for \ac{sata}
and SAS host bus adapters. In order to provide backwards-compatibility,
\ac{ahci} specifies modes for both legacy IDE emulation and a standardized
\ac{ahci} interface.
\ac{ahci} only implements the transport aspect of the communication with
devices. Commands are still transferred as specified in the \ac{ata}/\ac{atapi}
standards.
\section{ATA/ATAPI/SATA}
The \ac{ata} standard specifies an interface for connecting several types of
storage devices, including devices with removable media. \ac{atapi} provides an
extension to allow \ac{ata} to transmit \acs{scsi} commands.
Commands that can be sent to \ac{ata} devices are specified in the \ac{acs}
specifications. Commands in particular interest for this lab project are the
\texttt{IDENTIFY}, \texttt{READ DMA}, \texttt{WRITE DMA} and \texttt{FLUSH
CACHE} commands.
The way these commands are sent to the device is specified in the respective
specification, for example the \ac{sata} or \ac{pata} specifications.
\subsection{SATA}
The \ac{sata} standard specifies the layout of the command \acp{fis} that
encapsulate traditional ATA commands as well as all the lower layers of the
interface to the disk, such as the physical layer.
Figure \ref{fig:h2d_fis} shows the structure of an example \ac{fis}. A Host to
Device Register \ac{fis} can be used to send commands to the disk. The command
value is specified by \ac{ata}. The \ac{fis} contains additional values such as
\ac{lba} and sector count.
\begin{figure}[ht]
\centering
\includegraphics[width=.7\textwidth]{h2d_fis.pdf}
\caption{Host to Device Register FIS \cite[p.~336]{sata_2.6}}
\label{fig:h2d_fis}
\end{figure}
\section{AHCI}
\subsection{Memory Registers}
While the \acs{pci} base address register 0-4 may contain pointers to address
spaces for legacy IDE emulation, \ac{bar} 5 contains the address of the
\ac{hba}'s memory mapped registers. As shown in figure \ref{fig:hba_mem}, this
address space is divided into two areas: global registers for control of the
\ac{hba} and registers for up to 32 ports. A port can be attached to either a
device or a port multiplier. In this lab project, we focus on device handling
and ignore port multipliers.
\begin{figure}[ht]
\centering
\includegraphics[width=.7\textwidth]{hba_mem.png}
\caption{HBA Memory Space Usage \cite[p.~33]{ahci_1.3}}
\label{fig:hba_mem}
\end{figure}
Every port area (\autoref{fig:port_mem}) contains further control registers and
pointers to the memory regions for the command list and receive \ac{fis} area.
Each of these pointers is a 64-bit value (32-bit for \acp{hba} that don't
support 64-bit addressing) stored in two port registers.
\begin{figure}[ht]
\centering
\includegraphics[width=.9\textwidth]{pmem_overview.pdf}
\caption{Port System Memory Structure adapted from \cite[p.~34]{ahci_1.3}}
\label{fig:port_mem}
\end{figure}
\subsection{Received FIS Area}
The received \ac{fis} area serves as an area where copies of the \acp{fis}
received from the device are stored. The \ac{hba} will copy all incoming
\acp{fis} to the appropriate region of the \ac{rfis} area. If the \ac{hba}
receives an unkown \ac{fis} it is copied to the Unknown \ac{fis} region if it
is at most 64 bytes long. If the \ac{hba} receives an unknown \ac{fis} that is
longer than 64 bytes, it will be considered illegal.
\begin{figure}[ht]
\centering
\includegraphics[width=.8\textwidth]{rfis_area.pdf}
\caption{Received FIS Organization, adapted from \cite[p.~35]{ahci_1.3}}
\label{fig:rfis_mem}
\end{figure}
\subsection{Commands}
A command list (\autoref{fig:command_list}) contains 32 command headers, which
each contain the metadata for a single command.
Commands can be issued to the device by constructing a command header
containing a reference to a command table and further metadata for the command
to be issued.
\begin{figure}[ht]
\centering
\includegraphics[width=.8\textwidth]{command_list_structure.png}
\caption{Command List Structure \cite[p.~36]{ahci_1.3}}
\label{fig:command_list}
\end{figure}
The command table (\autoref{fig:command_table}) contains the command \ac{fis}
itself and an optional number of physical region descriptors specifying chunks
of main memory in form of a scatter-gather list.
\begin{figure}[ht]
\centering
\includegraphics[width=.8\textwidth]{command_table.png}
\caption{Command Table \cite[p.~39]{ahci_1.3}}
\label{fig:command_table}
\end{figure}
Commands are issued by setting the corresponding bit in the command issue
register. Upon command completion, the bit is cleared and if enabled, an
interrupt is triggered.
| {
"alphanum_fraction": 0.7746539792,
"avg_line_length": 38.2148760331,
"ext": "tex",
"hexsha": "aaa1048d6e02d3dbdfa27424bad7074fe263afac",
"lang": "TeX",
"max_forks_count": 55,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T05:00:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-02-03T05:28:12.000Z",
"max_forks_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "lambdaxymox/barrelfish",
"max_forks_repo_path": "doc/015-disk-driver-arch/intro.tex",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f",
"max_issues_repo_issues_event_max_datetime": "2020-03-18T13:30:29.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-03-22T14:44:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "lambdaxymox/barrelfish",
"max_issues_repo_path": "doc/015-disk-driver-arch/intro.tex",
"max_line_length": 79,
"max_stars_count": 111,
"max_stars_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "lambdaxymox/barrelfish",
"max_stars_repo_path": "doc/015-disk-driver-arch/intro.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-01T23:57:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-02-03T02:57:27.000Z",
"num_tokens": 1284,
"size": 4624
} |
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Second Chapter *********************************
%*******************************************************************************
\chapter{Experimental} % 1200 words as of sept 22
\ifpdf
\graphicspath{{Chapter2/Figs/Raster/}{Chapter2/Figs/PDF/}{Chapter2/Figs/}}
\else
\graphicspath{{Chapter2/Figs/Vector/}{Chapter2/Figs/}}
\fi
\section[Preparation of samples]{Preparation of pigment samples}
\label{section2.1}
Reference samples were acquired from Dr. Spike Bucklow from the Hamilton Kerr Institute collection and Dr. Andrea Kirkham from her sample library. All were loose powder and are described qualitatively in Table \ref{table:ref_sample}. The sample descriptions provided formed the basis for interpretation of results (and some terminology such as "bice" is used to describe both natural and synthetic pigments---this ambiguity is discussed when analysing results). Samples are pictured in \textit{Figures \ref{fig:sample_bags}} and \textit{\ref{fig:sample_vials}}, and prepared for confocal Raman analysis in \textit{Figure \ref{fig:sample_slides}}.
Prior to Raman analysis, small quantities of each pigment were pressed onto double-sided sellotape and smoothed. Prior to SEM-EDS analysis, small quantities of each pigment were pressed onto high purity carbon tabs.
\begin{table}[H]
\caption{Reference sample descriptions}
\centering
\label{table:ref_sample}
\begin{tabular}{c c}
\toprule
Reference sample & Qualitative physical description \\
\midrule
HKI natural azurite & Natural azurite, medium sandy blue \\
HKI cross section & Natural and artifical azurite \\
KE1a, KE1b & Green bice, light pale teal green \\
KE2 & Green verditer, CuCO\textsubscript{3} $\cdot$ Cu(OH)\textsubscript{2}, bright teal green \\
KE3 & Light verditer bice, medium dark blue \\
KE4 & Blue bice, medium grey blue \\
KE5 & Blue verditer, 2CuCO\textsubscript{3} $\cdot$ Cu(OH)\textsubscript{2}, dark blue \\
Fitz1 & Blue verditer, Fitzpatrick 10180, dark blue \\
Az1 & Azurite, medium light blue \\
Az2 & Azurite, dark deep blue \\
AzOp & Azurite, medium blue \\
AzMag & Azurite, medium teal blue \\
Ma1 & Malachite, medium light green \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}[H]
\centering
\includegraphics[width=0.75\linewidth]{sample_bags}
\caption[Samples KE1-5 and Fitz1.]{Samples KE1-5 and Fitz1 shown in storage bags.}
\label{fig:sample_bags}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=0.75\linewidth]{sample_vials}
\caption[Samples Az1, Az2, AzOp, AzMag, and Ma1.]{Samples Az1, Az2, AzOp, AzMag, and Ma1, shown in small sample vials.}
\label{fig:sample_vials}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=0.75\linewidth]{sample_slides}
\caption[All reference samples shown pressed on double-sided tape and prepared for Raman analysis.]{All reference samples shown pressed on double-sided tape and prepared for Raman analysis.}
\label{fig:sample_slides}
\end{figure}
Samples were also embedded in polyester resin ((Tiranti clear casting resin) and polished for Raman mapping and AFM analysis. Samples were filed to approximately 5 mm in height and the pigment containing surface was polished using three sequential grit sizes of silicon carbide paper (English Abrasives) and polishing cloth (Buehler), using a fine grade cerium oxide polishing powder (Beckman-RIIC) in ethanol. Finally, samples were cleaned with ethanol.
\section[Analysis of pigment particles by Raman spectroscopy]{Analysis of pigment particles by Raman spectroscopy}
\label{section2.2}
Collection of Raman spectra was done using a Horiba LabRAM HR Evolution confocal Raman spectrometer, a 50x microscope objective (Olympus LMPLFLN), a 600 grooves/mm grating, 100 $\mu$m pinhole, and a CCD array (1024x1024 pixels). An laser with excitation wavelength of 532 nm (diode-pumped solid-state, Laser Quantum) was initially selected based on optimal signal and lack of sample damage. Azurite and malachite should show strong signals at 532 nm based on previous work.~\autocite{Bicchieri} References were also collected at 473 nm (Cobolt, 1800 grooves/mm grating, 150 $\mu$m pinhole), which showed superior results for blue samples due to high reflection but did not perform well on green samples. Two other excitation wavelengths, 633 nm and 785 nm, were also tested and found to be inferior (\textit{Figure \ref{fig:Az1_wavelength_comparison}}). Spectra were collected by focusing on specific pigment grains.
Resin-embedded cross section samples from \textit{Battle of Spurs} were studied using the same procedure.
\begin{figure}[H]
\centering
\includegraphics[width=0.75\linewidth]{Az1_wavelength_comparison}
\caption[Comparison of spectra collected at 473, 532, 633, and 785 nm excitation wavelengths from sample Az1.]{Comparison of spectra collected at 473 (green), 532 (blue), 633 (red), and 785 nm (black) excitation wavelengths from sample Az1.}
\label{fig:Az1_wavelength_comparison}
\end{figure}
Optimal collection parameters maximised the signal to noise ratio of spectra and avoided damage (\textit{Figure \ref{fig:Az1_laserpower_comp_532}}). The damage threshold of pigment grains depended on the sample and the grain size, also observed in previous studies.~\autocite{Cardell,Mattei} Damage to azurite (blue bice, blue verditer) samples occurred at 4 mW (10 s acquisition, 10 accumulations). Malachite (green verditer) has a lower damage threshold, 2 mW (10 s acquisition, 10 accumulations). 1 mW power with an acquisition time of 10 s and 10 accumulations did not cause observable damage to any sample and gave useable signal quality. To be consistent, this lower surface power and acquisition time was used.
\begin{figure}[H]
\centering
\includegraphics[width=0.75\linewidth]{Az1_laserpower_comp_532}
\caption[Comparison of spectra collected using 532 nm excitation wavelength at 10\%, 25\%, and 50\% power.]{Comparison of spectra collected using 532 nm excitation wavelength at 10\% (black), 25\% (red), and 50\% (blue) power (5 acquisitions, 10 accumulations).}
\label{fig:Az1_laserpower_comp_532}
\end{figure}
Raman spectra were processed using OriginPro 2017 software. All spectra were fit to a spline baseline.
\section[Analysis of pigment particles by SEM-EDS]{Analysis of pigment particles by SEM-EDS}
\label{section2.3}
Pigment particle morphology and composition were characterized using scanning electron microscopy (SEM) coupled to energy dispersive X-ray spectroscopy (EDS). Micrographs of pigments were collected at several magnifications using a JEOL JSM-5510LV scanning electron microscope using the secondary electron (SE) detector. The sample working distance was 20 mm. The accelerating voltage was 10 keV unless otherwise noted. ED spectra were collected using an Oxford instruments INCA EDX detector and software. Elemental mapping was carried out to detect variation between pigment grains.
Resin embedded cross sections were studied using the same procedure, substituting the SE detector for the backscatter (BSE) detector. Prior to SEM-EDS analysis, cross sections were coated with a 25 nm layer of amorphous carbon and wrapped with copper metal.
\section[Analysis of pigment particles by AFM-IR]{Analysis of pigment particles by AFM-IR}
\label{section2.4}
Resin-embedded powders and cross sections were studied using an Anasys NanoIR2 instrument in contact and tapping modes. Height, deflection, and phase maps were collected in tapping mode using a HQ:NSC15 Al probe (Mikro Masch, 265-410 kHz resonant frequency, 20-80 N/m force constant). Map areas ranged from 80 x 80 $\mu$m to 2 x 2 $\mu$m with resolution of 250 x 250 pixels and scan rate 0.1 Hz.
Infrared maps and spectra were collected in contact mode using an ATEC-CONTAu-10 gold-coated silicon probe (7-25 kHz resonant frequency, 0.02-0.75 N/m spring constant, Nanosensors). Four quantum cascade lasers (Daylight Solutions, 10.98\% power) spanning a total range of 1125-2298 cm\textsuperscript{-1} were used. Infrared mapping was done at several frequencies discussed in analysis.
%The duty cycle was 3\%.
\section[Bulk sample analysis by ATR FT-IR]{Bulk sample analysis by attenuated total reflectance infrared spectroscopy}
\label{section2.5}
Attenuated total reflectance (ATR) infrared spectra were collected using a Bruker Vertex 70 FT-IR spectrometer (Goldengate ATR accessory, liquid nitrogen cooled MCT mid-IR detector, OPUS software). A background spectrum was collected before each sample spectrum using the clean ATR crystal (200 scans). Each sample spectrum was collected from solid powder/resin (200 scans, resolution 4 cm\textsuperscript{-1}). Spectra were processed using OriginPro 2017 by fitting a spline baseline to transmission spectra and subtracting. Data was converted from transmittance to absorbance as needed for comparison to AFM-IR spectra.
| {
"alphanum_fraction": 0.7712629584,
"avg_line_length": 76.6752136752,
"ext": "tex",
"hexsha": "e79e1b7cf72bc4a23b2c87dafd1871b0cbf64345",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "427e061c7b9bfdceb015999745c17ab22580dac8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ehpurdy22/mister-blue",
"max_forks_repo_path": "chapter2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "427e061c7b9bfdceb015999745c17ab22580dac8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ehpurdy22/mister-blue",
"max_issues_repo_path": "chapter2.tex",
"max_line_length": 916,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "427e061c7b9bfdceb015999745c17ab22580dac8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ehpurdy22/mister-blue",
"max_stars_repo_path": "chapter2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2261,
"size": 8971
} |
\section{Implementation}
\label{impl}
This section outlines relevant implementation details. We
implemented~\acdc{} in Open vSwitch (OVS) v2.3.2~\cite{ovs-website} and
added about 1200 lines of code (many are debug/comments).
A high-level overview follows.
\crs{
A hash table is added to OVS, and flows are hashed on a 5-tuple (IP addresses, ports and VLAN) to obtain a flow's state.
The flow entry state is 320 bytes and is used to maintain the congestion control state mentioned in \cref{design}.
SYN packets are used to create flow entries, and FIN packets, coupled with a course-grained garbage
collector, are used to remove flow entries. Other TCP packets, such as data and ACKs, trigger
updates to flow entries.
There are many more table lookup operations (to update flow state)
than table insertions or deletions (to add/remove flows). Thus, Read-Copy-Update (RCU)
hash tables~\cite{guniguntala2008read} are used to enable efficient lookups.
Additionally, individual {{\tt spinlocks}} are used on each flow entry in order to allow
for multiple flow entries to be updated simultaneously.
}
\crs{
Putting it together, the high-level operation on a data packet is as follows. An application on the sender generates a packet
that is pushed down the network stack to OVS. The packet is intercepted in {\tt ovs\_dp\_process\_packet}, where the
packet's flow entry is obtained from the hash table. Sequence number state is updated in the flow entry and ECN bits are set on
the packet if needed (see \cref{design}).
If the packet's header changes, the IP checksum is recalculated. Note TCP checksumming is offloaded to the NIC.
The packet is sent over the wire and received at the receiver's OVS. The receiver updates congestion-related state, strips
off ECN bits, recomputes the IP checksum, and pushes the packet up the stack. ACKs eventually triggered by the packet
are intercepted, where the congestion information is added. Once the ACK reaches the sender, the~\acdc{} module uses
the congestion information to compute a new congestion window. Then it modifies~\rwnd{} with a {{\tt memcpy}},
strips off ECN feedback and recomputes the IP checksum before pushing the packet up the stack.
Since TCP connections are bi-directional, two flow entries are maintained for each connection.
}
\crs{
Our experiments in~\cref{micro} show the CPU overhead of~\acdc{} is small and several implementation details
help reduce computational overhead. First, OVS sits
above NIC offloading features (\ie{}, TSO and GRO/LRO) in the networking stack. Briefly, NIC offloads allow
the host to pass large data segments along the TCP/IP stack and only deal with MTU-sized packets in the NIC. Thus,~\acdc{}
operates on a segment, rather than a per-packet, basis. Second,
congestion control is a relatively simple algorithm, and thus the computational burden is not high. Finally,
while~\acdc{} is implemented in software, it may be possible to further reduce the
overhead with a NIC implementation. Today, "smart-NICs"
implement OVS-offload functionality~\cite{cavium-nic,netronome-nic},
naturally providing a mechanism to reduce overhead and support hypervisor bypass (\eg{}, SR-IOV).
}
%We have also considered designing an~\acdc{}-enabled middlebox to support
%legacy systems that do not run OVS and cannot upgrade their NICs.
%
%Standard OVS kernel datapath LoC: 2360
%\acdc{} kernel datapath LoC: 3590
%
%\tightparagraph{UDP traffic} How to handle.
%Mention VxLAN traffic too.
%\keqiang{and IPsec}
%
%\tightparagraph{No vSwitch}
%\keqiang{title should be hypervisor-bypass?}
%Use middleboxes (for DB server).
%Use NIC (for SR-IOV).
%Hypervisor bypass (e.g., SR-IOV), where TCP traffic is sent to the NIC directly without
%going through hypervisor. First, as noted by~\cite{shieh2011sharing}, ``loss of the security and
%manageability features provided by the software virtual switch has limited
%the deployment of direct I/O NICs in public clouds''. Second, based on techniques like Intel
%DPDK~\cite{intel-dpdk} and ``smart NICs''~\cite{cavium-nic,netronome-nic}, we believe that low latency
%congestion control enforcement schemes like \acdc{} can also be
%employed for hypervisor bypass use cases.
%We need to worry about legacy systems and non-VM systems. For instance, a database or storage device that may not have OVS installed on it.
%We need to talk about either a middlebox or that this percentage of traffic is low? Or implement in NIC (especially one with OVS offload?).
%
%
%\tightparagraph{Little CPU and memory overhead}
%In our implementation, first we leverage the Read-Copy-Update (RCU)~\cite{guniguntala2008read} enabled hash tables
%to keep per-flow states (such as ``snd\_una'' and ``snd\_nxt''). RCU technique is also employed by
%Open vSwitch's kernel datapath and it helps improve processing speed for ``read-heavy''
%workloads (\ie{}, inserting new flows is much less frequent than looking-up existing flows) on
%shared-memory multiprocessor systems.
%Second, \acdc{} processes on ``segment'' level instead of ``packet'' level due to
%NIC offloading features (TSO at the sender side and GRO/LRO at the receiver side).
%Third, we also leverage the NIC checksumming offloading feature such that
%we do not need to compute checksums after we change TCP/IP header fields.
%Our microbenchmarks (\cref{micro}) show that \acdc{} incurs very little additional CPU overhead (less than 4\%) to
%support 10Gbps line-rate, even it is fully implemented in software.
%We are currently implementing \acdc{} on Cavium's programmable NICs~\cite{cavium-nic},
%where we can entirely offload the computational overhead to hardware. Therefore, we believe
%\acdc{} can support even higher line rates (\eg{}, 40Gbps).
%In our implementation, each TCP connection takes 320 bytes in the hash tables,
%so it takes around 3.2MB even there are 10K concurrent connections.
%{~\keqiang{i think we may want to mention that we use both FACK and PACK because we want to be compatible with TSO and try to minimize the CPU/traffic overhead.}
| {
"alphanum_fraction": 0.7803017742,
"avg_line_length": 66.2747252747,
"ext": "tex",
"hexsha": "f22b95a3889355ca946212e88c1919270e5f55d8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "770fc637f9b7d908f349bbbfa112cbc17d898be3",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "keqhe/phd_thesis",
"max_forks_repo_path": "acdctcp/impl.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "770fc637f9b7d908f349bbbfa112cbc17d898be3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "keqhe/phd_thesis",
"max_issues_repo_path": "acdctcp/impl.tex",
"max_line_length": 162,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "770fc637f9b7d908f349bbbfa112cbc17d898be3",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "keqhe/phd_thesis",
"max_stars_repo_path": "acdctcp/impl.tex",
"max_stars_repo_stars_event_max_datetime": "2017-10-20T14:28:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-08-27T08:03:16.000Z",
"num_tokens": 1509,
"size": 6031
} |
% $Id: phystat2011_adye.tex 289 2011-05-05 19:21:23Z T.J.Adye $
\documentclass{cernrep}
\newcommand{\babar}{\mbox{\slshape B\kern-0.1em{\small A}\kern-0.1em B\kern-0.1em{\small A\kern-0.2em R}}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\C}{\mathrm{C}}
\newcommand{\dd}[2]{\frac{\partial{#1}}{\partial{#2}}}
\title{Unfolding algorithms and tests using RooUnfold}
\author{Tim Adye}
\institute{Particle Physics Department, Rutherford Appleton Laboratory, Didcot, United Kingdom.}
\begin{document}
\maketitle
%========================================================================
\begin{abstract}
The RooUnfold package provides a common framework to evaluate and use
different unfolding algorithms, side-by-side. It currently provides
implementations or interfaces for the Iterative Bayes, Singular Value Decomposition, and
TUnfold methods, as well as bin-by-bin and matrix inversion reference
methods. Common tools provide covariance matrix evaluation and
multi-dimensional unfolding. A test suite allows comparisons of the
performance of the algorithms under different truth and measurement models.
Here I outline the package, the unfolding methods, and some experience of their use.
\end{abstract}
%========================================================================
\section{RooUnfold package aims and features}
The RooUnfold package~\cite{RooUnfold-web} was designed to provide a framework for different unfolding algorithms.
This approach simplifies the comparison between algorithms and has allowed
common utilities to be written.
Currently RooUnfold implements or interfaces to the Iterative Bayes~\cite{D'Agostini:1994zf,Bierwagen:PHYSTAT2011},
Singular Value Decomposition (SVD)~\cite{Hocker:1995kb,Kartvelishvili:PHYSTAT2011,Tackmann:PHYSTAT2011},
TUnfold~\cite{Schmitt-web}, bin-by-bin correction factors, and unregularized matrix inversion methods.
The package is designed around a simple object-oriented approach, implemented in
C++, and using existing ROOT~\cite{Brun:1997pa} classes. RooUnfold defines classes for the different
unfolding algorithms, which inherit from a common base class, and a class for
the response matrix. The response matrix object is independent of the unfolding,
so can be filled in a separate `training' program.
RooUnfold can be linked into a stand-alone program, run from a ROOT/CINT script, or
executed interactively from the ROOT prompt.
The response matrix can be initialized using existing histograms or matrices, or
filled with built-in methods (these can take care of the normalization when inefficiencies are to be considered).
The results can be returned as a histogram with errors, or a vector with full covariance matrix.
The framework also takes care of handling multi-dimensional distributions
(with ROOT support for 1--, 2--, and 3--dimensional (1D,2D,3D) histograms),
different binning for measured and truth distributions,
variable binning, and the option to include or exclude under- and over-flows.
It also supports different methods for calculating the errors that can
be selected with a simple switch: bin-by-bin errors with no correlations,
the full covariance matrix from the propagation of measurement errors in the unfolding, or
the covariance matrix calculated using Monte Carlo (MC) toys.
All these details are handled by the framework, so do not have to be
implemented for each algorithm. However different bin layouts may not produce good results for
algorithms that rely on the global shape of the distribution (SVD).
A toy MC test framework is provided, allowing
selection of different MC probability density functions (PDF) and parameters,
comparing different binning, and performing the unfolding with the different
algorithms and varying the unfolding regularization parameters.
Tests can be performed with 1D, 2D, and 3D distributions.
The results of a few such tests are presented in section~\ref{sec:adye:examples}.
\section{C++ classes}
Figure~\ref{Fig:adye:classes} summarizes how the ROOT and RooUnfold classes are used
together. The RooUnfoldResponse object can be constructed using a 2D response histogram (TH2D)
and 1D truth and measured projections (these are required to determine the effect of inefficiencies).
Alternatively, RooUnfoldResponse can be filled directly with the
\texttt{Fill($x_{\rm measured}$, $x_{\rm true}$)}
and
\texttt{Miss($x_{\rm true}$)}
methods, where the \texttt{Miss} method is used to count an event that was not measured
and should be counted towards the inefficiency.%
\begin{figure}
\centerline{\includegraphics[width=0.8\textwidth]{phystat2011_adye_classes.eps}}
\caption
[The RooUnfold classes.]%
{The RooUnfold classes. The training truth, training measured, measured data, and unfolded distributions
can also be given as TH2D or TH3D histograms.}%
\label{Fig:adye:classes}%
\end{figure}
The RooUnfoldResponse object can be saved to disk using the usual ROOT input/output
streamers. This allows the easy separation in separate programs
of MC training from the unfolding step.
A RooUnfold object is constructed using a RooUnfoldResponse object and the measured
data. It can be constructed as a RooUnfoldBayes, RooUnfoldSvd, RooUnfoldTUnfold, (etc)
object, depending on the algorithm required.
The results of the unfolding can be obtained as ROOT histograms (TH1D, TH2D, or TH3D)
or as a ROOT vector (TVectorD) and covariance matrix (TMatrixD). The histogram will
include just the diagonal elements of the error matrix. This should be used with care,
given the significant correlations that can occur if there is much bin-to-bin migration.
\section{Unfolding algorithms}
\subsection{Iterative Bayes' theorem}
The RooUnfoldBayes algorithm uses the method described by D'Agostini in~\cite{D'Agostini:1994zf}.
Repeated application of Bayes' theorem is used to invert the response matrix.
Regularization is achieved by stopping iterations before reaching the `true'
(but wildly fluctuating) inverse.
The regularization parameter is just the number of iterations.
In principle, this has to be tuned according to the sample statistics and binning.
In practice, the results are fairly insensitive to the precise setting used
and four iterations are usually sufficient.
RooUnfoldBayes takes the training truth as its initial prior, rather than a flat distribution,
as described by D'Agostini.
This should not bias result once we have iterated, but could reach an optimum after fewer iterations.
This implementation takes account of errors on the data sample but not,
by default, uncertainties in the response matrix due to finite MC statistics.
That calculation can be very slow, and usually the training sample is much larger
than the data sample.
RooUnfoldBayes does not normally do smoothing, since this has not been found to be necessary
and can, in principle, bias the distribution. Smoothing can be enabled with an option.
\subsection{Singular Value Decomposition}
RooUnfoldSvd provides an interface to the
TSVDUnfold class implemented in ROOT by Tackmann~\cite{Tackmann:PHYSTAT2011}, which
uses the method of H\"ocker and Kartvelishvili~\cite{Hocker:1995kb}.
The response matrix is inverted using singular value decomposition,
which allows for a linear implementation of the unfolding algorithm.
The normalization to the number of events is retained in order to minimize
uncertainties due to the size of the training sample.
Regularization is performed using a smooth cut-off on small singular value contributions
($s_i^2 \rightarrow s_i^2 / (s_i^2 + s_k^2)$, where the $k$th singular value defines the cut-off),
which correspond to high-frequency fluctuations.
The regularization needs to be tuned according to the distribution, binning, and sample statistics
in order minimize the bias due to the choice of the training sample (which dominates at small $k$)
while retaining small statistical fluctuations in the unfolding result (which grow at large $k$).
The unfolded error matrix includes the contribution of uncertainties on the
response matrix due to finite MC training statistics.
\subsection{TUnfold}
RooUnfoldTUnfold provides an interface to the TUnfold method implemented in ROOT by Schmitt~\cite{Schmitt-web}.
TUnfold performs a matrix inversion with 0-, 1-, or 2-order polynomial regularization of neighbouring bins.
RooUnfold automatically takes care of packing 2D and 3D distributions
and creating the appropriate regularization matrix required by TUnfold.
TUnfold can automatically determine an optimal regularization parameter ($\tau$) by scanning the
`L-curve' of $\log_{10} \chi^2$ vs $\log_{10} \tau$.
\subsection{Unregularized algorithms}
Two simple algorithms,
RooUnfoldBinByBin, which applies MC correction factors with no inter-bin migration,
and RooUnfoldInvert, which performs unregularized matrix inversion with singular value removal (TDecompSVD)
are included for reference.
These methods are not generally recommended: the former risks biases from the MC model,
while the latter can give large bin-bin correlations and magnify statistical fluctuations.
\section{Examples\label{sec:adye:examples}}
Examples of toy MC tests generated by RooUnfoldTest
are shown in Figs.~\ref{Fig:adye:bayes-example}--\ref{Fig:adye:tunfold-example}.
These provide a challenging test of the procedure.
Completely different training and test MC models are used:
a single wide Gaussian PDF for training and a
double Breit-Wigner for testing. In both cases
these are smeared, shifted, and a variable inefficiency
applied to produce the `measured' distributions.%
\begin{figure}
\makebox[\textwidth]{\includegraphics[angle=-90,width=.640\textwidth,clip]{phystat2011_adye_bayes1.eps}\hfill
\includegraphics[angle=-90,width=.338\textwidth,clip]{phystat2011_adye_bayes2.eps}}%
\caption
[Unfolding with the Bayes algorithm.]%
{Unfolding with the Bayes algorithm.
On the left, a double Breit-Wigner PDF on a flat background (green curve) is used to generate
a test `truth' sample (upper histogram in blue).
This is then smeared, shifted, and a variable inefficiency applied to produce
the `measured' distribution (lower histogram in red).
Applying the Bayes algorithm with 4~iterations on this latter gave the unfolded result
(black points), shown with errors from the diagonal elements of the error matrix.
The bin-to-bin correlations from the error matrix are shown on the right.}%
\label{Fig:adye:bayes-example}%
\end{figure}%
\begin{figure}
\makebox[\textwidth]{\includegraphics[angle=-90,width=.640\textwidth,clip]{phystat2011_adye_svd1.eps}\hfill
\includegraphics[angle=-90,width=.338\textwidth,clip]{phystat2011_adye_svd2.eps}}%
\caption
[Unfolding with the SVD algorithm]%
{Unfolding with the SVD algorithm ($k=30$) on the same training and test
samples as described in Fig.~\ref{Fig:adye:bayes-example}.}%
\label{Fig:adye:svd-example}%
\end{figure}%
\begin{figure}
\makebox[\textwidth]{\includegraphics[angle=-90,width=.640\textwidth,clip]{phystat2011_adye_tunfold1.eps}\hfill
\includegraphics[angle=-90,width=.338\textwidth,clip]{phystat2011_adye_tunfold2.eps}}%
\caption
[Unfolding with the TUnfold algorithm]%
{Unfolding with the TUnfold algorithm ($\tau=0.004$) on the same training and test
samples as described in Fig.~\ref{Fig:adye:bayes-example}.
Here we use two measurement bins for each truth bin.}%
\label{Fig:adye:tunfold-example}
\end{figure}
\section{Unfolding errors}
Regularization introduces inevitable correlations between bins in the unfolded distribution.
To calculate a correct $\chi^2$, one has to invert the covariance matrix:
\begin{equation}
\chi^2 = (\mathbf{x}_{\mathrm{measured}} - \mathbf{x}_{\mathrm{true}})^{\mathrm{T}} \mathbf{V}^{-1}
(\mathbf{x}_{\mathrm{measured}} - \mathbf{x}_{\mathrm{true}})
\end{equation}
However, in many cases, the covariance matrix is poorly conditioned,
which makes calculating the inverse problematic.
Inverting a poorly conditioned matrix involves subtracting large, but
very similar numbers, leading to significant effects due to the
machine precision.
\subsection{Unfolding errors with the Bayes method}
As shown on the left-hand side of Fig.~\ref{fig:bayes_errors},
the uncertainties calculated by propagation of errors in the Bayes method
were found to be significantly underestimated compared to those given by the toy MC.
This was found to be due to an omission in the original method
outlined by D'Agostini (\cite{D'Agostini:1994zf}~section~4).%
\begin{figure}
\makebox[\textwidth]{\includegraphics[width=.47\textwidth,clip]{phystat2011_adye_bayes_errors_old.eps}\hfill
\includegraphics[width=.47\textwidth,clip]{phystat2011_adye_bayes_errors_new.eps}}%
\caption
[Bayesian unfolding errors compared to toy MC]%
{Bayesian unfolding errors (lines) compared to toy MC RMS (points) for 1, 2, 3, and 9 iterations
on the Fig.~\ref{Fig:adye:bayes-example} test.
The left-hand plot shows the errors using D'Agostini's original method,
ignoring any dependence on previous iterations (only the $M_{ij}$ term in Eq.~(\ref{eq:dnCidnEj})).
The right-hand plot shows the full error propagation.}%
\label{fig:bayes_errors}%
\end{figure}
The Bayes method gives the unfolded distribution (`estimated causes'), $\hat{n}(\C_i)$,
as the result of applying the unfolding matrix, $M_{ij}$, to the measurements (`effects'), $n(\E_j)$:
\begin{equation}
\hat{n}(\C_i) = \sum_{j=1}^{n_{\E}} M_{ij} n(\E_j)
\quad\mathrm{where}\quad
M_{ij} = \frac{P(\E_j|\C_i) n_0(\C_i)}{\epsilon_i \sum_{l=1}^{n_{\C}} P(\E_j|\C_l) n_0(C_l)}
\label{eq:nCi}
\end{equation}
\noindent $P(\E_j|\C_i)$ is the $n_{\E} \times n_{\C}$ response matrix,
$\epsilon_i \equiv \sum_{j=1}^{n_{\E}} P(\E_j|\C_i)$ are efficiencies, and
$n_0(C_l)$ is the prior distribution --- initially arbitrary (eg. flat or MC model), but updated on
subsequent iterations.
The covariance matrix, which here we call $V(\hat{n}(\C_k),\hat{n}(\C_l))$,
is calculated by error propagation from $n(\E_j)$,
but $M_{ij}$ is assumed to be itself independent of $n(\E_j)$. That is only true for the first iteration.
For subsequent iterations, $n_0(\C_i)$ is replaced by $\hat{n}(\C_i)$ from the
previous iteration, and $\hat{n}(\C_i)$ depends on $n(\E_j)$ (Eq.~(\ref{eq:nCi})).
To take this into account, we compute the error propagation matrix
\begin{equation}
\dd{\hat{n}(\C_i)}{n(\E_j)} = M_{ij} + \sum_{k=1}^{n_{\E}} M_{ik} n(\E_k)
\left( \frac{1}{n_0(\C_i)} \dd{n_0(\C_i)}{n(\E_j)} - \sum_{l=1}^{n_{\C}} \frac{\epsilon_l}{n_0(\C_l)} \dd{n_0(\C_l)}{n(\E_j)} M_{lk} \right)
\label{eq:dnCidnEj}
\end{equation}
This depends upon the matrix $\dd{n_0(\C_i)}{n(\E_j)}$, which is $\dd{\hat{n}(\C_i)}{n(\E_j)}$ from the previous iteration.
In the first iteration, the second term vanishes ($\dd{n_0(\C_i)}{n(\E_j)}=0$) and we get $\dd{\hat{n}(\C_i)}{n(\E_j)} = M_{ij}$.
The error propagation matrix can be used to obtain the covariance matrix on the unfolded distribution
\begin{equation}
V(\hat{n}(\C_k),\hat{n}(\C_l)) = \sum_{i,j=1}^{n_{\E}} \dd{\hat{n}(\C_k)}{n(\E_i)} V(n(\E_i),n(\E_j)) \dd{\hat{n}(\C_l)}{n(\E_j)}
\label{eq:Vij}
\end{equation}
\noindent from the covariance matrix of the measurements, $V(n(\E_i),n(\E_j))$.
Without the new second term in Eq.~(\ref{eq:dnCidnEj}),
the error is underestimated if more than one iteration
is used, but agrees well with toy MC tests if the full error propagation is used,
as shown in Fig.~\ref{fig:bayes_errors}.%
\section{Status and plans}
RooUnfold was first developed in the \babar\ software environment and
released stand-alone in 2007.
Since then, it has been used by physicists from many
different particle physics, particle-astrophysics, and nuclear physics groups.
Questions, suggestions, and bug reports from users
have prompted new versions with fixes and improvements.
Last year I started working with a small group hosted by the
Helmholtz Alliance, the Unfolding Framework Project\cite{unfolding-project}.
The project is developing unfolding experience, software, algorithms, and performance tests.
It has adopted RooUnfold as a framework for development.
Development and improvement of RooUnfold is continuing.
In particular, determination of the systematic errors due to uncertainties on
the response matrix, and due to correlated measurement bins will be added.
The RooUnfold package will be incorporated into the ROOT distribution,
alongside the existing TUnfold and TSVDUnfold classes.
%========================================================================
\begin{thebibliography}{99}
\bibitem{RooUnfold-web}
The RooUnfold package and documentation are available from\\
\verb=http://hepunx.rl.ac.uk/~adye/software/unfold/RooUnfold.html=
\bibitem{D'Agostini:1994zf}
G.~D'Agostini,
``A Multidimensional unfolding method based on Bayes' theorem'',
Nucl.\ Instrum.\ Meth.\ A {\bf 362} (1995) 487.
%%CITATION = NUIMA,A362,487;%%
\bibitem{Bierwagen:PHYSTAT2011}
K.~Bierwagen,
``Bayesian Unfolding'',
% these proceedings.
presented at PHYSTAT 2011 (CERN, Geneva, January 2011), to be published in a CERN Yellow Report.
\bibitem{Hocker:1995kb}
A.~Hocker and V.~Kartvelishvili,
``SVD Approach to Data Unfolding'',
Nucl.\ Instrum.\ Meth.\ A {\bf 372} (1996) 469.
% [arXiv:hep-ph/9509307].
%%CITATION = NUIMA,A372,469;%%
\bibitem{Kartvelishvili:PHYSTAT2011}
V.~Kartvelishvili,
``Unfolding with SVD'',
% these proceedings.
presented at PHYSTAT 2011 (CERN, Geneva, January 2011), to be published in a CERN Yellow Report.
\bibitem{Tackmann:PHYSTAT2011}
K.~Tackmann,
``SVD-based unfolding: implementation and experience'',
% these proceedings.
presented at PHYSTAT 2011 (CERN, Geneva, January 2011), to be published in a CERN Yellow Report.
\bibitem{Schmitt-web}
The TUnfold package is available in ROOT~\cite{Brun:1997pa} and documented in\\
\verb=http://www.desy.de/~sschmitt/tunfold.html=
\bibitem{Brun:1997pa}
R.~Brun and F.~Rademakers,
``ROOT: An object oriented data analysis framework'',
Nucl.\ Instrum.\ Meth.\ A {\bf 389} (1997) 81.
%%CITATION = NUIMA,A389,81;%%
See also \verb=http://root.cern.ch/=.
\bibitem{unfolding-project}
For details of the Unfolding Framework Project, see\\
\verb=https://www.wiki.terascale.de/index.php/Unfolding_Framework_Project=
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.7638014992,
"avg_line_length": 50.349862259,
"ext": "tex",
"hexsha": "8bcc5b107b3afb39c96077122cbb22248899818d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e31378c59da54295e2fe58ab73dfee5d6cf7f7fd",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "RhiannonSJ/SBND_Analysis_Tool",
"max_forks_repo_path": "doc/phystat2011/phystat2011_adye.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e31378c59da54295e2fe58ab73dfee5d6cf7f7fd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "RhiannonSJ/SBND_Analysis_Tool",
"max_issues_repo_path": "doc/phystat2011/phystat2011_adye.tex",
"max_line_length": 140,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e31378c59da54295e2fe58ab73dfee5d6cf7f7fd",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "RhiannonSJ/SBND_Analysis_Tool",
"max_stars_repo_path": "doc/phystat2011/phystat2011_adye.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4909,
"size": 18277
} |
\section[The Real and Complex Number Systems]{\hyperlink{toc}{The Real and Complex Number Systems}}
\subsection{The Naturals, Integers, and Rationals}
We begin by a review of number systems which are already familiar.
\begin{ndef}{: The Natural Numbers}
The \textbf{Naturals}, denoted by $\NN$, is the set $\set{1, 2, 3, \ldots}$.
\end{ndef}
\noindent For $x, y \in \NN$, we have that $x + y \in \NN$ and $xy \in \NN$, so the naturals are closed under addition and multiplication. However, we note that it is not closed under subtraction; take for example $2 - 4 = -2 \notin \NN$.
\begin{ndef}{: The Integers}
The \textbf{Integers}, denoted by $\ZZ$, is the set $\set{\ldots, -3, -2, -1, 0, 1, 2, 3, \ldots}$.
\end{ndef}
\noindent The integers are closed under addition, multiplication, and subtraction. However, it is not closed under division; for example, $1/2 \notin \ZZ$.
\begin{ndef}{: The Rationals (informal)}
The \textbf{Rationals}, denoted by $\QQ$, can be defined as $\set{\frac{m}{n}: m \in \ZZ, n \in \NN}$, where $\frac{m_1}{n_1}$ and $\frac{m_2}{n_2}$ are identified if $m_1n_2 = m_2n_1$.
\end{ndef}
\noindent We note that unlike the naturals/integers, the rationals do not have as obvious of a denumeration. This above is a good definition if we already have the same rigorous idea of what a rational number is in our mind; i.e. it works because we have a shared preconceived understanding of a rational number.
If this is not the case, it may help to define the rational numbers more rigorously/formally (even if the definition may be slightly harder to parse). As a second attempt at a definition, we can say that $\QQ$ is the set of ordered pairs $\set{(m, n): m \in \ZZ, n \in \NN}$. However, this is not quite enough as we need a notion of equivalence between two rational numbers (e.g. $(1, 2) = (2, 4)$). Hence, a complete and rigorous definition would be:
\begin{ndef}{: The Rationals (formal)}
The \textbf{Rationals}, denoted by $\QQ$, is the set $\set{(m, n): m \in \ZZ, n \in \NN}/\sim$ where $(m_1, n_1) \sim (m_2, n_2)$ if $m_1n_2 = m_2n_1$.
\end{ndef}
\noindent Under the formal definition, the rationals are a set of equivalence classes of ordered pairs, under the equivalence relation $\sim$. We note that the rationals are closed under addition, subtraction, multiplication, and division.
This formal definition might be slightly harder to parse, so it might be useful to consider an example with a similar flavour. Consider the set $X = \set{m \in \ZZ}/\sim$ such that $m_1 \sim m_2$ if $m_1 - m_2$ is divisible by 12. This is "clock arithmetic", with equivalence classes $[0], [1], [2], \ldots$ for each hour on an analog clock. A fun side note: If instead of 12 we picked a prime number, we would get a field (we will discuss what this is in a later lecture)!
Note that under this definition, $(1, 2)$ and $(2, 4)$ are different representations of the same rational number. With this definition, we would define addition such that $(m_1, n_1) + (m_2, n_2) = (m_1n_2 + m_2n_1, n_1n_2)$. Note that $(2m_1, 2n_2) + (m_2, n_2) = (2m_1n_2 + 2m_2n_1, 2n_1n_2)$ and we can identify $(m_1n_2 + m_2n_1, n_1n_2)$ with $(2m_1n_2 + 2m_2n_1, 2n_1n_2)$. If we choose different representations when we do addition, we might get a different representation in our result, but it will represent the same rational number regardless of the choice of representations we originally chose to do the addition.
A natural question then becomes if the rationals are sufficient for doing all of real analysis. Certainly, it seems as we have a number system that is closed under all our basic arithmetic operations; but is this enough? For example, are we able to take limits just using the rationals? The answer turns out to be no (they are insufficient!) and the following example will serve as one illustration of this fact.
\begin{example}{Incompleteness of the Rationals}{1.1a}
There exists no $p \in \QQ$ such that $p^2 = 2$.
\end{example}
\noindent We proceed via proof by contradiction. Recall in that these types of proof, we start with a certain wrong assumption, follow a correct/true line of reasoning, reach an eventual absurdity, and therefore conclude that the original assumption was mistaken.
\begin{nproof}
Let us then suppose for the contradiction that there exists $p = \frac{m}{n}$ with $p^2 = 2$. We then have that not both $m, n$ are even, and hence at least one is odd. Then, we have that $2 = p^2 = \frac{m^2}{n^2}$ and hence $m^2 = 2n^2$, so $m^2$ is even, implying $m$ is even. So, let us write $m = 2k$ for $k \in \ZZ$. Then, $(2k)^2 = 4k^2 = 2n^2$, and hence $2k^2 = n^2$. Therefore, $n^2$ is even and hence $n$ is even. $m$ and $n$ are therefore both even, a contradiction. We conclude that no such $p$ exists. \qed
\end{nproof}
\noindent Why can we conclude that not both $m, n$ are even in the above proof? This is the case as if $m, n$ we both even, then we could write $m = 2m'$, $n = 2n'$ for some $m', n'$, and then $p = \frac{m}{n} = \frac{2m'}{2n'} = \frac{m'}{n'}$ which we can continue until either the numerator or denominator is odd. A natural question to consider is how to prove that this process of reducing fractions will eventually conclude. The resolution is to invoke the fundamental theorem of arithmetic, and write $m, n$ in terms of their unique prime factorization. We are then able to cancel out factors of 2 from the numerator/denominator until at least one is odd.
We note that this example leads us to conclude that the rationals have certain ``holes'' in them. This is concerning, as there are sequences of rational numbers that tend to $\sqrt{2}$. Conversely, its not as concerning that there is no rational number $x$ such that $x^2 = -1$, as there is no such sequence of rational numbers that is "close to" $i$ (note that both $\sqrt{2}$ and $i$ have not yet been defined, but this will come shortly).
\setcounter{rudin}{0}
\begin{example}{Incompleteness of the Rationals}{1.1b}
Let $A = \set{p \in \QQ: p > 0, p^2 < 2}$, and $B = \set{p \in \QQ: p > 0, p^2 > 2}$. Then, $\forall p \in A, \exists q \in A$ such that $p < q$, and $\forall p \in B, \exists q \in B$ such that $q < p$.
\end{example}
\begin{figure}[htbp]
\centering
\begin{tikzpicture}
\draw[latex-latex, very thick] (-6, 0) -- (6,0) node[anchor=south] {$\QQ$};
\draw[] (0, 0) -- (0, -0.25) node[anchor=north] {0};
\foreach \i in {-5.7,-5.6,...,5.7}{
\draw[] (\i,0) -- (\i,-0.1);
}
\draw[] (1,0.1) node[anchor=south] {$\sqrt{2}$};
\draw[] (0.96,0) node[] {$)$};
\draw[] (1.04,0) node[] {$($};
\draw[] (-2.5, 0) node[anchor=south] {$A$};
\draw[] (3.5, 0) node[anchor=south] {$B$};
\end{tikzpicture}
\caption{Visualization of sets $A$ and $B$. We note that $\sqrt{2}$ has not been defined in our formalism yet, but from our prior mathematical intuition it would be what goes in the "hole" of the rationals.}
\label{fig1}
\end{figure}
\noindent For the proof of this statement, we consider playing a 2 person game. One person is $\forall$, one person is $\exists$, and we consider if one person has a winning strategy. $\forall$ goes first, and then $\exists$ goes next, having seen the choice that $\forall$ has made. Then, we check if indeed $p < q$. If $p < q$, then $\exists$ wins. If $p \not< q$, then $\forall$ wins.
\begin{nproof}
Let $p \in A$. Then, let $q = \frac{2p + 2}{2 + p}$. Since $p \in \QQ$, it follows that $2p + 2 \in \QQ$ and $2 + p \in \QQ$ so $q \in \QQ$. Furthermore, we have that $2p + 2 > 0$ and $2 + p > 0$, so $q > 0$. We also have that:
\[q^2 = \frac{(2p+2)^2}{(2+p)^2} = 2 + \frac{2(p^2 - 2)}{(p+2)^2} < 2\]
Where the inequality follows from the fact that $p^2 < 2$ and hence $(p^2 - 2) < 0$. It therefore follows that $q \in A$. Finally, we have that:
\[q = p + \frac{2-p^2}{2+p} > p\]
so $q > p$, completing the proof of the first part of the claim. The second part is left as an exercise (we note that the same $q$ can be used). \qed
\end{nproof}
\noindent The number $q = \frac{2p+ 2}{2 + p}$ seems to be pulled out of a hat, but actually comes from a fairly geometric picture (the secant method of approximating roots). Discussion on this topic can be found here: \url{https://math.stackexchange.com/questions/141774/choice-of-q-in-baby-rudins-example-1-1}.
\subsection{Ordered Sets}
Over the next couple sections, we will be discussing certain properties of sets that will give us a better understanding of the real numbers, and allow us to construct them.
\setcounter{rudin}{4}
\begin{definition}{Order}{1.5}
An \textbf{order} $<$ on a set $S$ is a relation with the following properties:
\begin{enumerate}[(i)]
\item For every pair $x, y \in S$, exactly one of $x < y$, $x = y$, or $y < x$ is true.
\item For $x, y, z \in S$, if $x < y$ and $y < z$, then $x < z$.
\end{enumerate}
A point on notation; We note that $x > y$ means $y < x$, and $x \leq y$ means $x < y$ or $x = y$.
\end{definition}
\begin{definition}{Ordered Sets}{1.6}
An \textbf{ordered set} is a pair $(S, <)$. We may write just $S$ if the order can be inferred by the context.
\end{definition}
\noindent A familiar (and useful) set of examples is $S = \NN$ or $S = \ZZ$ or $S = \QQ$. For these three sets, we have that $x < y$ if $y-x$ is positive. For another example, consider the set $S$ of english words; then the order $<$ can be the dictionary/lexographic order.
\begin{definition}{Upper \& Lower Bounds}{1.7}
Let $S$ be an ordered set and $E \subset S$ (for the duration of these notes, we will follow Rudin's notation, with $E \subset S$ as a non-strict subset, and $E \subsetneq S$ as a strict subset). $E$ is \textbf{bounded above} if there exists an element $\beta \in S$ such that $\forall x \in E$, $x \leq \beta$. Any such $\beta$ is an \textbf{upper bound} of $E$. Similarly, we say that $E$ is \textbf{bounded below} if there exists an element $\alpha \in S$ such that $\forall x \in E$, $\alpha \leq x$. In this case, $\alpha$ is a \textbf{lower bound} of $E$.
\end{definition}
\noindent As an example, one can take $S = \QQ$, $E = A = \set{p \in \QQ: p > 0, p^2 > 2}$ (as in Example \ref{exam:1.1b}(b)). Here, $E$ is bounded above, with $\beta = 2$ as one possible upper bound. to see this is the case, consider that if $p \in E$:
\[2 - p = \frac{4 - p^2}{2+p} > \frac{4-2}{2+p} > 0\]
\noindent However, if we take $S = A$, $E = A$, then $E$ is not bounded above as we saw in the example. There is no upper bound of $A$ in $A$. In general, this example reveals the subtle point that "the upper bound of a set" is ill-defined; we need to specify $E \subset S$.
\subsection{The Least Upper Bound Property}
\begin{definition}{Least Upper Bound \& Greatest Lower Bound}{1.8}
Let $S$ be an ordered set, and let $E \subset S$ with $E$ bounded above. If $\exists \alpha \in S$ such that:
\begin{enumerate}[(i)]
\item $\alpha$ is an upper bound for $E$
\item If $\gamma < \alpha$, then $\gamma$ is not an upper bound for $E$
\end{enumerate}
The $\alpha$ is the \textbf{least upper bound}, or \textbf{supermum} of $E$. This can be denoted as $\alpha = \sup(E)$. Analogously, the \textbf{greatest lower bound}, or \textbf{infimum} of E (denoted $\alpha = \inf(E)$) is an element $\alpha \in S$ (if it exists) such that:
\begin{enumerate}[(i)]
\item $\alpha$ is a lower bound for $E$
\item If $\gamma > \alpha$, then $\gamma$ is not an upper bound of $E$.
\end{enumerate}
\end{definition}
\begin{ntheorem}{}
If the supremum/infimum of $E \subset S$ exist, they are unique.
\end{ntheorem}
\begin{nproof}
Let $E \subset S$. Suppose that there exist $\alpha_1, \alpha_2$ such that $\alpha_1 = \sup(E)$ and $\alpha_2 = \sup(E)$. If $\alpha_1 < \alpha_2$, as $\alpha_1$ is an upper bound of $E$, this contradicts the fact that $\alpha_2$ is the least upper bound of $E$. We reach an identical contradiction if $\alpha_2 < \alpha_1$. Therefore we conclude that $\alpha_1 = \alpha_2$ and the supremum of $E$ is unique (if it exists). The proof for the infimum is analogous. \qed
\end{nproof}
\begin{ntheorem}{}
If $E \subset S$ has a maximum element $\alpha$ (that is, an element such that $x < \alpha$ for all $x \in E$) then $\alpha = \sup(E)$. Similarly, if $E$ has a minimum element $\alpha$, then $\alpha = \inf(E)$.
\end{ntheorem}
\begin{nproof}
Let $E \subset S$ and $\alpha = \max(E)$. By definition $\alpha$ is an upper bound of $E$, and if $x < \alpha$ for some $x \in E$ then $x$ is not an upper bound of $E$ as it is not greater than $\alpha \in E$. The claim follows (with an identical proof for the minimum). \qed
\end{nproof}
\begin{example}{}{1.9}
\begin{enumerate}
\item Consider again the sets $A, B \subset \QQ$ from example \ref{exam:1.1b}. $A$ is bounded above by any element in $B$, and the upper bounds of $A$ are exactly the elements of $B$. Since $B$ has no smallest member, $A$ does not have a least upper bound in $\QQ$.
\item Let $E_1, E_2 \subset \QQ$ such that $E_1 = \set{r: \QQ, r < 0}$ and $E_2 = \set{r: \QQ, r \leq 0}$. Then $\sup(E_1) = \sup(E_2) = 0$. Note that this example shows that the supremum can either be contained or not contained in the set; $0 \notin E_1$ but $0 \in E_2$.
\item Let $E \subset \QQ$ such that $E = \set{\frac{1}{n}: n \in \NN}$. Then $\sup(E) = 1$ and $\inf(E) = 0$. This is proven below.
\end{enumerate}
\end{example}
\begin{nproof}
$\sup(E) = 1$ immediately follows from the equivalence of the maximum and supremum as proven above. To see that $\inf(E) = 0$, first note that $0$ is a lower bound for $E$ as all of the elements of $E$ are positive. To see that it is the lower bound, take any $x > 0$. Then, we have that for any $n > \frac{1}{x}$, $\frac{1}{n} < x$ and hence $x$ is not an upper bound of $E$. This proves the claim. \qed
\end{nproof}
\begin{definition}{The LUB/GUB Property}{1.10}
An ordered set $S$ has the \textbf{least upper bound property} if for every $E \subset S$, if $E \neq \emptyset$ and $E$ is bounded above, then $E$ has a least upper bound (that is, $\sup(E)$ exists in $S$). Similarly, an ordered set $S$ has the \textbf{greatest lower bound property} if for every $E \subset S$, if $E \neq \emptyset$ and $E$ is bounded below, then $E$ has a greatest lower bound.
\end{definition}
\noindent We will show in the next theorem that these properties are actually equivalent; before then, we briefly consider two examples.
\begin{nexample}{}
$\ZZ$ has the least upper bound property, while $\QQ$ does not.
\end{nexample}
\begin{nproof}
For the first claim, consider any nonempty $E \subset \ZZ$ that is bounded above. Choose any $x \in E$. Since $\ZZ$ is bounded above, there exist finitely many elements that are greater than $x$. Take the maximum of these finitely many elements. This maximum is also the maximum of $E$, so it is the supremum of $E$. Therefore $\ZZ$ has the LUB property as claimed.
The second claim immediately follows from Example \ref{exam:1.9}(a). \qed
\end{nproof}
\begin{theorem}{}{1.11}
Let $S$ be an ordered set. Then $S$ has the LUB property if and only if it has the GUB property.
\end{theorem}
\begin{nproof}
$\boxed{\implies}$ Let $S$ be an ordered set with the LUB property. Let $E \subset S$ with $E \neq \emptyset$, with $E$ bounded below. Let $L = \set{x \in S: x\text{ is a lower bound of $E$.}}$. $L \neq \emptyset$ as $E$ is bounded below (and hence has at least one lower bound). If $y \in E$, then $y$ is an upper bound for $L$. Since $E$ is nonempty, $L$ is therefore bounded above. Since $S$ has the LUB property, then $\sup(L)$ must exist. Let us call this $\alpha$. Then, $\alpha \leq x\ \forall x \in E$ (as if $\gamma < \alpha$, then $\gamma$ is not an upper bound of $L$ and hence $\gamma \neq E$). Hence, $\alpha$ is a lower bound for $E$ and hence $\alpha \in L$. Since $\alpha = \sup(L)$ and $\alpha$ is an upper bound for $L$, we have that $\alpha \geq \gamma\ \forall \gamma \in L$. Thus, $\alpha = \inf(E)$.
$\boxed{\impliedby}$ Left as an exercise. \qed
\end{nproof}
\subsection{Fields and Ordered Fields}
\begin{definition}{Fields}{1.12}
A \textbf{field} $F$ is a set with two binary operations, $+$ and $\cdot$ (addition and multiplication) such that the following axioms are satisfied:
\begin{enumerate}[start=1, label={(A\arabic*):}]
\item If $x, y \in F$, then $x + y \in F$. (Closure under addition)
\item $x + y = y + x$ for all $x, y \in F$. (Commutativity of addition)
\item $(x+y) + z = x + (y + z)$ for all $x, y, z \in F$. (Associativity of addition)
\item $\exists 0 \in F$ such that $\forall x \in F$, $0 + x = x$. (Additive identity)
\item $\forall x \in F$, $\exists y$ such that $x + y = 0$. We can denote $y = -x$. (Additive inverse)
\end{enumerate}
\begin{enumerate}[start=1, label={(M\arabic*):}]
\item If $x, y \in F$, then $x\cdot y\in F$. (Closure under multiplication)
\item $x \cdot y = y \cdot x$ for all $x, y \in F$.
\item $(x\cdot y)\cdot z = x \cdot (y \cdot z)$ for all $x, y, z \in F$. (Associativity under multiplication)
\item $\exists 1 \in F$ such that $1 \neq 0$ and $\forall x \in F$, $1 \cdot x = x$. (Multiplicative identity)
\item $\forall x \in F$, exists $y \in F$ such that $x \cdot y = 1$. We can denote $y = \frac{1}{x}$. (Multiplicative inverse)
\end{enumerate}
(D): $x \cdot (y + z) = x \cdot y + x \cdot z$, $\forall x, y, z \in F$. (Distributive law)
\end{definition}
\noindent Note that A3/M3 show that $x + y + z$ and $x\cdot y\cdot z$ are well defined in a mathematical sense; however, associativity may not hold for computers that do math with finite precision!
\begin{ntheorem}{}
The additive/multiplicative identities given by (A4)/(M4) and the additive/multiplicative inverses given by (A5)/(M5) are unique.
\end{ntheorem}
\begin{nproof}
Let $F$ be an ordered field. Suppose that there exist $0_1, 0_2 \in F$ such that $0_1 + x= x$ and $0_2 + x = x$ for all $x \in F$. We then have that:
\begin{align*}
0_1 + 0_2 &= 0_1 + 0_2
\\ 0_1 + 0_2 &= 0_2 + 0_1 & \text{(A2)}
\\ 0_2 &= 0_1 & \text{(Property of additive identity)}
\end{align*}
Which shows that the additive identity is unique. The remaining proofs are left as an exercise. \qed
\end{nproof}
\noindent Some easy (and familiar) consequences of the field axioms can be found in Rudin 1.14-1.16. Instead of repeating those here, we will discuss some examples.
The rationals form a field (under the usual notions of addition/multiplication), but the integers do not, as there are no multiplicative inverses (e.g. there exists no integer $x \in \ZZ$ such that $2\cdot x = 1$). The simplest example of a field is $F = \set{0, 1}$, with the relations:
\begin{align*}
0 + 0 = 0\quad 0\cdot0 = 1
\\ 0 + 1 = 0 \quad 0 \cdot 1 = 0
\\ 1 + 1 = 0 \quad 1 \cdot 1 = 1
\end{align*}
This field is often called $\mathbb{F}_2$ or $F_2$, and is useful in computer science (where bits can take on two states, 0 or 1). As a slight tangent, a byte (8 bits) can be considered an element of an 8-dimensional vector space over the field $\mathbb{F}_2$, where $+$ would be the XOR operator and $\cdot$ would be the AND operation.
A generalization of the above example is $\mathbb{F}_p$ or $F_p$, for a prime number $p$. This field would consist of the elements $0, 1, \ldots, p-1$. The addition and multiplication are carried out mod $p$. An interesting result is that in general, finite fields must have cardinality of some prime power.
Note that a field cannot have a single element; the field axioms (A4) and (M4) require the existence of distinct additive and multiplicative identities, which a singleton set cannot satisfy.
Although algebra is not the focus of this course, it may be interesting to briefly think about sets with less structure than a field. We start by considering a group.
\phantom{i}
\noindent A \textbf{group} $G$ is a set with a binary operation $(a,b) \mapsto a\cdot b$ such that the following axioms are satisfied:
\begin{enumerate}[start=1, label={(M\arabic*):}]
\item If $a, b \in G$, then $a\cdot b \in G$ (Closure)
\stepcounter{enumi}
\item For $a, b, c \in G$, $(a\cdot b)\cdot c = a\cdot(b\cdot c)$ (Associativity)
\item There exists $1 \in G$ such that $\forall x \in G$, $1 \cdot x = x$. (Identity)
\item $\forall x \in G$, there exists $y \in G$ such that $x \cdot y = 1$. (Inverse)
\end{enumerate}
We note that $\ZZ$ is a group under addition, but not under multiplication (due to lack of multiplicative inverses). We can also consider the set of 2x2 matrices with integer entries:
\[G = \set{\m{a & b \\ c & d}: a, b, c, d \in \ZZ}\]
$G$ is again a group under matrix addition, but not under matrix multiplication (as not every matrix in $G$ is invertible). If we restricted $G$ to be the set of $2\times 2$ invertible matrices, in this case it could form a group under matrix multiplication. A set with slightly more structure than a group (though not quite as structured as a field) is a ring:
\newpage
\noindent A \textbf{ring} $R$ is a set with two binary operations $(a,b) \mapsto a + b$ and $(a, b) \mapsto a \cdot b$ such that the following axioms are satisfied:
\begin{enumerate}[start=1, label={(A\arabic*):}]
\item If $x, y \in R$, then $x + y \in R$. (Closure under addition)
\item $x + y = y + x$ for all $x, y \in R$. (Commutativity of addition)
\item $(x+y) + z = x + (y + z)$ for all $x, y, z \in R$. (Associativity of addition)
\item $\exists 0 \in R$ such that $\forall x \in R$, $0 + x = x$. (Additive identity)
\item $\forall x \in R$, $\exists y$ such that $x + y = 0$. We can denote $y = -x$. (Additive inverse)
\end{enumerate}
\begin{enumerate}[start=1, label={(M\arabic*):}]
\item If $x, y \in R$, then $x\cdot y\in R$. (Closure under multiplication)
\stepcounter{enumi}
\item $(x\cdot y)\cdot z = x \cdot (y \cdot z)$ for all $x, y, z \in R$. (Associativity under multiplication)
\item $\exists 1 \in R$ such that $1 \neq 0$ and $\forall x \in R$, $1 \cdot x = x$. (Multiplicative identity)
\end{enumerate}
\begin{enumerate}[start=1, label={(D\arabic*):}]
\item $x \cdot (y + z) = x \cdot y + x \cdot z$, $\forall x, y, z \in R$. (Left distributivity)
\item $(y + z) \cdot x = y \cdot x + z \cdot x$, $\forall x, y, z \in R$. (Right distributivity)
\end{enumerate}
\noindent Rings have the same axioms as fields under addition, but multiplication is not necessarily commutative (this is why an additional distributivity axiom is added), and multiplicative inverses are not required. We note that $\ZZ$ and $G$ are both rings under their respective operations of addition and multiplication.
For the remainder of this course, we will really only be discussing fields; however, they will be the objects of interest in abstract algebra courses!
\setcounter{rudin}{16}
\begin{definition}{Ordered Field}{1.13}
An \textbf{Ordered field} is a field $F$ that is also an ordered set, such that the following axioms are satisfied:
\begin{enumerate}[(i)]
\item If $x, y, z \in F$ and $y < z$, then $x + y < x + z$.
\item If $x, y \in F$ and $x > 0, y > 0$, then $x\cdot y > 0$.
\end{enumerate}
\end{definition}
\noindent Some properties of ordered fields are discussed in Rudin 1.18. We will again refer the reader to the discussion in the textbook for these properties, and here consider some examples.
$\QQ$ is an ordered field, with the familiar order of $a > b$ if $a - b > 0$. A question may arise if $\mathbb{F}_2$ is an ordered field. A priori fields do not have order, but is it possible to impose an order on this set such that it is an ordered field? The answer turns out to be no.
\begin{proof}
It suffices to show that both possible orderings leads to a contradiction. Suppose $0 < 1$. Then, $1 = 0 + 1 < 1 + 1 = 0$ which is a contradiction. Suppose instead that $1 < 0$. Then, $0 = 1 + 1 < 1 + 0 = 1$ which again is a contradiction.
\end{proof}
\stepcounter{rudin}
\begin{theorem}{Existence of $\RR$}{1.19}
There exists an ordered field $\RR$ which has the LUB property and contains $\QQ$ as a subfield.
\end{theorem}
\noindent What does it mean for $\QQ$ to be a subfield? It means that there exists an injective function $\QQ \mapsto \RR$ that respects the properties of an ordered field.
This field $\RR$ happens to be exactly the set of real numbers we are familiar with. However, a natural question is ``what does it mean that there exsits a field?" It turns out that we can define the reals based on the definitions we have made already. One further question might be that could there not exists several fields with the above property; however, taking the appropriate view, we will find that there is a unqiue such field.
\subsection{Consequences of the LUB Property}
We will use the least upper bound property and the fact that $\RR$ has $\QQ$ as a subfield to derive its properties.
\begin{theorem}{Archimedian Property, Density of Rationals/Irrationals in $\RR$}{1.20}
\begin{enumerate}
\item If $x, y \in \RR$ and $x > 0$, then $\exists n \in \NN$ such that $nx > y$.
\item If $x, y \in \RR$, and $x < y$, then $\exists p \in \QQ$ such that $x < p < y$. ($\QQ$ is dense in $\RR$)
\item If $x, y \in \RR$, and $x < y$, then $\exists \alpha \in \RR \setminus \QQ$ such that $x < \alpha < y$. ($\RR\setminus\QQ$ is dense in $\RR$)
\end{enumerate}
\end{theorem}
\begin{nproof}
(a) Let $A = \set{nx: n \in \NN}$. Suppose for the sake of contradiction that the conclusion was false; then $y$ is an upper bound of $A$. Then, $\alpha = \sup(A)$ exists by the LUB property of $\RR$. Since $x > 0$, we then have that $\alpha - x < \alpha$ by the property of an ordered field. Hence, $\alpha - x$ is not an upper bound for $A$. Therefore, there exists some $m \in \NN$ such that $mx > \alpha - x$. It then follows that $(m+1)x > \alpha$. We therefore have found $m+1 = k \in \NN$ such that $kx > \alpha$, contradicting $\alpha$ being the least upper bound of $A$. \qed
\end{nproof}
\noindent In order to prove (b) and (c), we first prove a stronger version of 1.20(a):
\begin{nlemma}{}
If $x, y \in \RR$ and $x > 0$, then there exists $n \in \ZZ$ such that $(n-1)x \leq y < nx$.
\end{nlemma}
\begin{nproof}
Suppose $y \geq 0$. Let $A = \set{m \in \NN: y < mx} \subset \NN$. By Theorem \ref{thm:1.20} (a), we have that $A \neq \emptyset$. Every non-empty subset of $\NN$ has a smallest element (to see this, let $x \in A$, and define $A' = \set{y \in A: y \leq x}$. This is finite and nonempty and so has a smallest element, and the minimum element of this set will also be a lower bound and hence the minimum element of all of $A$), so let $n = \min(A)$. The claim holds for this $n$.
The case for $y < 0$ is left as an exercise. \qed
\end{nproof}
\begin{nproof}
(b) Since $y - x > 0$, by (a), $\exists n \in \NN$ such that $1 < n(y-x)$. Furthermore, by the Lemma we have that $\exists m \in \ZZ$ such that $m - 1 \leq nx < m$ and hence $m \leq nx + 1$. From these inequalities we obtain that $nx < m \leq nx + 1 < ny$, and therefore $x < \frac{m}{n} < y$ for some $m \in \ZZ$, $n \in \NN$. \qed
\end{nproof}
\noindent For the proof of part (c), we will use the result of Theorem \ref{thm:1.21} from the next section, specifically that there exists $s \in \RR \setminus \QQ$ such that $s > 0$ and $s^2 = 2$. We will call this $\sqrt{2}$.
\begin{nproof}
(c) First, we have that $\sqrt{2} < 2$ as if $\sqrt{2} = 2$ then $(\sqrt{2})^2 = 2 = 2^2 = 4$ which is a contradiction, and if $\sqrt{2} > 2$ then $2 = \sqrt{2}\cdot \sqrt{2} > 2\cdot 2 = 4$ by Rudin 1.18 which is yet again a contradiction. Thus, $\frac{\sqrt{2}}{2} < 1$.
Let $x, y \in \RR$ such that $x < y$. By Theorem \ref{thm:1.20}(b), there exists $p, q \in \QQ$ such that $x < p < q < y$. Let $\alpha = p + \frac{\sqrt{2}}{2}(q - p)$. Then, we have that $p <\alpha < p + 1(q-p) < q$ and hence $x < p < \alpha < q < y$.
If $\alpha \in \QQ$, then $\sqrt{2} = 2\left(\frac{\alpha-p}{q-p}\right) \in \QQ$, which is a contradiction, so it follows that $\alpha \in \RR \setminus \QQ$. \qed
\end{nproof}
\subsection{Integer Roots of the Reals}
In this section, we will prove that $\sqrt{2}$ exists and is an irrational number, but we will not use the fact that $\RR \setminus \QQ$ is dense in $\RR$; this would of course be circular reasoning. The more general idea will be to prove that for any $n \in \NN$, there exists $y \in \RR$ such that $y = x^{1/n}$. Before this, we prove a lemma.
\begin{nlemma}{}
If $0 < a < b$ and $n \in \NN$, then $0 < b^n - a^n \leq nb^{n-1}(b-a)$
\end{nlemma}
\noindent Note that a ``Calculus proof'' of this Lemma would be to let $f(x) = x^n$, and then
\[f(b) - f(a) = f'(c)(b-a) = nc^{n-1}(b-a) \leq nb^{n-1}(b-a)\]
Where we invoke the mean value theorem. But this obviously doesn't work as we have neither defined a derivative nor proven the mean value theorem. A proper proof would be:
\begin{nproof}
Let $0 < a < b$. Then, we may factor $b^n - a^n$ such that:
\[b^n - a^n = (b-a)(b^{n-1} + ab^{n-2} + a^2b^{n-3} + \ldots + a^{n-2}b + a^{n-1})\]
The second factor is a sum of $n$ terms, each positive, and in between $0$ and $b^{n-1}$. ThereforE:
\[b^n - a^n \leq nb^{n-1}(b-a)\]
which proves the claim. \qed
\end{nproof}
\noindent We will now state the theorem formally:
\begin{theorem}{Integer Roots of the Reals}{1.21}
Let $x \in \RR$, $x > 0$, and $n \in \NN$. Then, there exists a unique $y \in \RR$ such that $y > 0$ and $y^n = x$.
\end{theorem}
\noindent Note that somewhere in the proof, we will use the fact that $y \in \RR$; this statement doesn't hold for rationals (see Example \ref{exam:1.1a}) so some property of the reals must come into play somewhere.
\begin{nproof}
If $n = 1$, then the unique solution is $y = x$; we may therefore assume that $n \geq 2$.
\\ \textbf{Uniqueness:} Suppose there exist two distinct numbers $y_1, y_2$ with $y_1 > 0, y_2 > 0$, and $y_1^n = y_2^n = x$. WLOG, suppose $0 < y_1 < y_2$. We then have that $0 < y_1^n < y_2^n$ which is a contradiction.
\\ \textbf{Existence:} We prove existence in three steps.
\begin{enumerate}[1.]
\item We show that $E \neq \emptyset$. Let $E = \set{t \in \RR: t > 0, t^n < x}$. If $x < 1$, then $x^n < x$, so $x \in E$. If $x \geq 1$, then $\left(\frac{1}{2}\right)^n < \frac{1}{2} < x$, so $\frac{1}{2} \in E$. Therefore, $E \neq \emptyset$.
\item We show that $E$ is bounded above and has a supremum in $\RR$. If $t > 1 + x$, then it follows that $t^n > t > x$, so $t \neq E$. Hence, $1 + x$ is an upper bound of $E$. By Theorem \ref{thm:1.19} (the LUB property of $\RR$), it follows that $\sup(E) \in \RR$ exists.
\item We show that $y = \sup(E)$ satisfies $y^n = x$. As $\RR$ is an ordered field, one of $y^n < x$, $y^n = x$, or $y^n > x$ must be true; we show that the first and third are impossible.
\begin{enumerate}
\item Suppose $y^n < x$. We will obtain a contradiction by finding $h > 0$ such that $(y+h)^n < x$. (Why is this a contradiction? $y+ h > y$, so if $(y+h)^n < x$, then $y + h \in E$, contradicting the fact that $y + h$ would be an upper bound of $E$). WLOG, suppose that $h < 1$. By the above Lemma, we have that:
\[(y+h)^n - y^n \leq n(y+h)^{n-1}h \leq n(y+1)^{n-1}h\]
By choosing $h$ sufficiently small, that is:
\[h < \min\set{1, \frac{x-y^n}{n(y+1)^{n-1}}}\]
Then $n(y+1)^{n-1}h < x^n - y^n$ from which it follows that $(y+h)^n - y^n < x^n - y^n$ and so $y+h < x$, which is the desired contradiction.
\item Suppose $y^n > x$. We will obtain a contradivction by finding $h > 0$ such that $(y-h)^n > x$. If this is true, then $y-h$ is an upper bound for $E$, contradicting the fact that $y$ is the least upper bound for $E$. WLOG suppose that $h < 0$. Again applying the Lemma, we have that:
\[y^n - (y-h)^n \leq ny^{n-1}h\]
By choosing $h$ sufficiently small, that is:
\[h < \min\set{1, \frac{y^n-x}{ny^{n-1}}}\]
It then follows that:
\[y^n - (y-h)^n \leq ny^{n-1}h < y^n - x\]
and hence $(y-h)^n > x$, which is the desired contradiction. \qed
\end{enumerate}
\end{enumerate}
\end{nproof}
\subsection{Construction of the Reals}
Theorem \ref{thm:1.19} says that there exists an ordered field that contains $\QQ$ as a subfield. We now go about proving this statement. The construction is fairly technical and hence will be carried out in multiple steps. Some of the steps are left as exercises (one can refer to Rudin for the fully complete construction).
\begin{nblank}{Step 1: Defining the elements of $\RR$}
The members of $\RR$ will be proper subsets of $\QQ$, called cuts. $\RR = \set{\text{all cuts}}$.
\begin{ndef}{: Cuts}
A \textbf{cut} is a proper subset $\alpha \subsetneq \QQ$ with the three properties:
\begin{enumerate}[(I)]
\item $\alpha \neq \emptyset$
\item If $p \in \alpha$, then $q \in \alpha \; \forall q < p$.
\item If $p \in \alpha$, then $\exists r \in \alpha$ such that $p < r$.
\end{enumerate}
\end{ndef}
\end{nblank}
\begin{figure}[htbp]
\centering
\begin{tikzpicture}
\draw[latex-latex, very thick] (-6, 0) -- (6,0) node[anchor=south] {$\QQ$};
\foreach \i in {-5.7,-5.6,...,0.9}{
\draw[] (\i,0) -- (\i,-0.1);
}
\draw[] (1,0.1) node[anchor=south] {$\downarrow$};
\draw[] (0.96,0) node[] {$)$};
\draw[] (-2.5, 0) node[anchor=south] {$\alpha$};
\end{tikzpicture}
\caption{Visualization of a cut $\alpha$. The real number being described of this cut can be thought of as the number at the right boundary (the arrow).}
\label{fig2}
\end{figure}
\noindent In a sense, a cut gives us a way of discussing the real numbers (in the way we are familiar with them already) without referring to them directly; much like we could formally define/refer to rationals as equivalence classes of ordered pairs.
\noindent As a note, we could very well define cuts to be bounded below rather than above, and the following construction would still work out.
\begin{nblank}{Step 2: $\RR$ is an ordered set}
We define $\alpha < \beta$ to mean $\alpha \subsetneq \beta$. We show that this makes $\RR$ into an ordered set. First checking transitivity, we have that if $\alpha < \beta$ and $\beta < \gamma$ then $\alpha < \gamma$ by the fact that set inclusion is transitive. Furthermore, at most one of $\alpha < \beta$, $\alpha = \beta$, and $\beta < \alpha$ hold; to see this is the case, suppose the first two fail. Then, $\alpha \nsubseteq \beta$. Hence, $\exists p \in \alpha$ with $p \notin \beta$. If $q \in \beta$, $q < p$ and hence $q \in \alpha$ by (II), so $\beta \subset \alpha$, and since $\beta \neq \alpha$ it follows that $\beta \subsetneq \alpha$.
\end{nblank}
\begin{nblank}{Step 3: $\RR$ has the LUB property}
We show that $\RR$ has the LUB property. To see this is the case, let $A \subset \RR$ with $A \neq \emptyset$, and suppose that there exists $\beta \in \RR$ that is an upper bound for $A$. We will now define $\gamma = \bigcup_{\alpha \in A}\alpha$ and prove that $\gamma \in \RR$ and $\gamma = \sup A$ (hence $A$ has a supremum and $\RR$ has the LUB property).
Since $A \neq \emptyset$, $\exists \alpha_0 \in A$, and since $\alpha_0 \neq \emptyset$ (as it is a cut) and $\alpha \subset \gamma$, it follows that $\gamma \neq \emptyset$. Next, we have that $\gamma \subset \beta$, since $\alpha \subset \beta$ for every $\alpha \in A$, and hence $\gamma \neq \QQ$, that is, $\gamma \subsetneq \QQ$. Hence $\gamma$ satisfies property (I) of a cut.
Take $p \in \gamma$. Then $p \in \alpha_1$ for some $\alpha_1 \in A$. If $q < p$, then $q \in \alpha$ (as $\alpha$ is a cut) so $q \in \gamma$, satisfying property (II).
Next, choose $r \in \alpha_1$ such that $r > p$, then $r \in \gamma$ (as $\alpha_1 \subset \gamma$) and hence $\gamma$ satisfies property (III). Hence $\gamma$ is a cut, and $\gamma \in \RR$.
Finally, we show that $\gamma = \sup A$. Clearly, $\alpha \leq \gamma$ for all $\alpha \in A$, as $\gamma = \bigcup_{\alpha \in A}\alpha$, so $\gamma$ is an upper boun dof $A$. To show that it is the least upper bound, let $\delta < \gamma$ be a cut. Then, $\exists s \in \gamma$ such that $s \notin \delta$. Therefore, $\exists \alpha_2 \in A$ such that $s \in \alpha_2$; hence $\delta < \alpha_2$, so $\delta$ is not an upper bound for $A$, giving the desired result.
\end{nblank}
\begin{nblank}{Step 4: Addition on $\RR$}
\begin{ndef}{: Addition}
If $\alpha, \beta \in \RR$, we define $\alpha + \beta = \set{s + t: s \in \alpha, t \in \beta}$. Showing that this is a cut is left as an exercise.
\end{ndef}
\begin{ndef}{: Zero}
$0^* = \set{s \in \QQ}$. Showing that this is a cut is left as an exercise.
\end{ndef}
We leave it as an exercise to show that the addition axioms (A1)-(A5) of a field are satisfied under this definition of addition on $\RR$, with the 0 element as $0^*$ defined above.
\end{nblank}
\begin{nblank}{Step 5: $\RR$ satisfies the Ordered Field Property (i)}
We verify that if $\alpha, \beta, \gamma \in \RR$ and $\beta < \gamma$, then $\alpha + \beta < \alpha + \gamma$.
For every $s \in \alpha, t \in \beta$, we have that $t \in \gamma$ as $\beta$ is a subset of $\gamma$ by the definition of order on $\RR$. Hence, $s + t \in \alpha + \beta$ implies $s + t \in \alpha + \gamma$. Therefore, $\alpha + \beta \subset \alpha + \gamma$ and hence $\alpha + \beta \leq \alpha + \gamma$.
We are then left to check that $\alpha + \beta \neq \alpha + \gamma$. To see that this is the case, if $\alpha + \beta = \alpha + \gamma$, then $\beta = \alpha + \beta - \alpha = \alpha + \gamma - \alpha = \gamma$ by the field axioms for addition. Therefore we obtain that $\beta = \gamma$, contradicting that $\beta < \gamma$. Hence the claim is proven.
As a remark, note that $0^* < \alpha \iff -\alpha < 0^*$.
\end{nblank}
\noindent Next we will define multiplication on $\RR$. A first attempt would be $\alpha \cdot \beta = \set{s \cdot t: s\in \alpha, t \in \beta}$. However, this definition is incosistent with negative numbers from what we require multiplication to accomplish. $-1 \cdot -1$ would fail to be a cut (it would not contain any negative numbers and hence fail criteria (II)) and $-1 \cdot 1$ would yield the entirety of the rationals (again not a cut!)
\begin{nblank}{Step 6: Positive Multiplication on $\RR$}
\begin{ndef}{: Positive Reals}
We define $\RR^+ = \set{\alpha \in \RR: \alpha > 0^*}$
\end{ndef}
\begin{ndef}{Multiplication of Positive Reals}
If $\alpha, \beta \in \RR^+$, we define $\alpha \cdot \beta = \set{r \cdot s: r \in \alpha, r> 0, s \in \beta, s > 0} \cup \set{t \in \QQ, t \leq 0}$. Equivalently, $\alpha \cdot \beta = \set{p \in \QQ: \leq r \cdot s: r \in \alpha, r > 0, s \in \beta, s > 0}$. We leave it as an exercise to show that $\alpha \cdot \beta \in \RR$, and moreover, $\alpha \cdot \beta \in \RR^+$. Showing this second fact proves ordered field property (ii).
\end{ndef}
\begin{ndef}{: One}
$1^* = \set{r \in \QQ: r < 1}$. We again leave showing $1^* \in \RR^+$ as an exercise.
\end{ndef}
\end{nblank}
\begin{nblank}{Step 7: Multiplication on all of $\RR$}
\begin{ndef}{: Multiplication by zero}
$\alpha \cdot 0^* = 0^* = 0^* \cdot \alpha$
\end{ndef}
\begin{ndef}{: Multiplication}
We define general multiplication as below, where the $\cdot$ on the RHS represents the multiplication of positive reals as outlined in Step 5.
\begin{align*}
\alpha \cdot \beta =
\begin{cases}
(-\alpha)\cdot(-\beta) & \text{if $\alpha < 0^*$ and $\beta < 0^*$}
\\ -\left((-\alpha)\cdot\beta\right) & \text{if $\alpha < 0^*$ and $\beta > 0^*$}
\\ -\left(\alpha \cdot (-\beta)\right) & \text{if $\alpha > 0^*$ and $\beta < 0^*$}
\end{cases}
\end{align*}
\end{ndef}
We leave it as an exercise to show that the multiplicative axioms (M1)-(M5), as well as the distributive law (D) of a field are satisfied under this definition of multiplication on $\RR$.
\end{nblank}
\noindent Up until this point, we have shown $\RR$ is an ordered field with the LUB property; we last check that it contains $\QQ$ as a subfield. Note that we do have to be a bit careful with what we mean here; $\RR$ does not literally contain $\QQ$; $\RR$ is indeed a set of proper subsets of $\QQ$. What we really mean is to associate every element of $\QQ$ to an element of $\RR$ such that the field structure is preserved.
\begin{nblank}{Step 8: $\RR$ contains $\QQ$ as a subfield}
For each $r \in \QQ$, associate the cut $r^* = \set{p \in \QQ, p < r^*}$. We then leave as an easy exercise to verify that $r^* < s^* \iff r < s$, $r^* + s^* = r + s$, and $r^*\cdot s^* = r\cdot s$. This concludes the construction of the reals. \qed
\end{nblank}
\noindent Note that later on in the course, we will construct the real numbers in a different fashion; by considering Cauchy sequences modulo an equivalence relation. Also note that from here on out, it will suffice to have the standard/traditional picture of a "real number" in mind (i.e. infinite decimal expansions) and we will not have to really think about the real numbers as cuts; this was just necessary for the formal construction.
\subsection{The Complex Field}
\setcounter{rudin}{23}
\begin{definition}{The Complex Numbers}{1.24}
We define the set of \textbf{complex numbers} to be $\set{(a, b): a, b \in \RR}$, denoted by $\CC$. For $x = (a, b) \in \CC$ and $y = (c, d) \in \CC$, we write $x = y$ if and only if $a = c$ and $b = d$ (note that this is a very different notion of equality compared to the rationals). We define the zero element to be $(0, 0)$ and the one element to be $(1, 0)$. We define addition of complex numbers such that:
\begin{align*}
x + y = (a, b) + (c, d) = (a + c, b + d)
\end{align*}
And multiplication of complex numbers such that:
\begin{align*}
x\cdot y = (a, b)\cdot (c, d) = (ac - ba, ad + bc)
\end{align*}
\end{definition}
\begin{theorem}{}{1.25}
The operations of $+$ and $\cdot$, as well as the zero/one elements defined above turn $\CC$ into a field.
\end{theorem}
\begin{nproof}
It suffices to verify the field axioms (A1)-(A5), (M1)-(M5), and (D) as discussed in \ref{def:1.12}. We will here show (M3), (M4), and (M5) and leave the rest as exercises.
\begin{enumerate}[start=3, label={(M\arabic*):}]
\item Let $x, y, z \in \CC$. We show that $(x\cdot y)\cdot z = x \cdot (y \cdot z)$. Let $x = (a, b), y = (c, d)$, and $z = (e, f)$. We then have that:
\begin{align*}
(x\cdot y) \cdot z &= (ac - bd, ad + bc) \cdot (e, f)
\\ &= ((ac-bd)e - (ad+bc)f, (ac-bd)f + (ad+bc)e)
\end{align*}
We also have that:
\begin{align*}
x \cdot(y\cdot z) &= (a, b)\cdot(ce - df, cf + de)
\\ &= (a(ce-df) - b(cf+de), a(cf+de) + b(ce-df))
\\ &= (ace - adf - bcf - bde, acf + ade + bce - bdf)
\\ &= ((ac-bd)e - (ad+bc)f, (ac-bd)f + (ad+bc)e)
\end{align*}
So the claim is proven.
\item $(a, b)(1, 0) = (a \cdot 1 - b \cdot 0, a \cdot 0 + b \cdot 1) = (a, b)$
\item Let $x \in \CC$ such that $x \neq 0$. Then, $x = (a, b)$ where either $a \neq 0$ or $b \neq 0$ or both. Hence, $a^2 + b^2 > 0$. Then, let $\frac{1}{x} = (\frac{a}{a^2 + b^2}, -\frac{b}{a^2+b^2})$. We then have that:
\begin{align*}
x\frac{1}{x} &= (a, b)\left(\frac{a}{a^2 + b^2}, -\frac{b}{a^2+b^2}\right)
\\ &= \left(a\frac{a}{a^2 + b^2} - b\left(-\frac{b}{a^2+b^2}\right), a\left(-\frac{b}{a^2+b^2}\right) + b\left(\frac{a}{a^2+b^2}\right)\right)
\\ &= \left(\frac{a^2 +b^2}{a^2 + b^2}, -\frac{ab}{a^2+b^2} + \frac{ab}{a^2+b^2}\right)
\\ &= (1, 0)
\end{align*}
Which proves the claim. \qed
\end{enumerate}
\end{nproof}
\noindent Much like $\QQ$ was a subfield of $\RR$, $\RR$ is a subfield of $\CC$, and there exists a map $\phi$ from $\RR$ to $\CC$ that respects the field axioms, namely:
\begin{align*}
\fullfunction{\phi}{\RR}{\CC}{x}{(x, 0)}
\end{align*}
The theorem below shows that $\phi$ preserves the field structure:
\begin{theorem}{}{1.26}
For $a, b \in \RR$ we have that $(a, 0) + (b, 0) = (a + b, 0)$ and $(a, 0)(b, 0) = (ab, 0)$.
\end{theorem}
\begin{definition}{i}{1.27}
$i = (0, 1)$.
\end{definition}
\begin{theorem}{}{1.28}
$i^2 = -1$.
\end{theorem}
\begin{theorem}{}{1.29}
If $a, b \in \RR$, then $(a, b) = a + bi$.
\end{theorem}
\begin{nproof}
Below are the trivial proofs for the above three theorems.
\begin{align*}
(a, 0) + (b, 0) = (a + b, 0 + 0) = (a + b, 0)
\\ (a, 0)\cdot(b, 0) = (a\cdot b - 0 \cdot 0, a \cdot 0 + 0 \cdot b) = (ab, 0)
\\ i^2 = i\cdot i = (0, 1) \cdot (0, 1) = (-1, 0) = -1
\\ a + bi = (a, 0) + b(0, 1) = (a, 0) + (0, b) = (a, b)
\end{align*}
\end{nproof}
\noindent A slightly odd question may be to ask whether $\CC$ is a subfield of $\RR$, i.e. does there exist $\psi: \CC \mapsto \RR$ such that $\psi(a + b) = \psi(a) + \psi(b)$ and $\psi(a\cdot b) = \psi(a) \cdot \psi(b)$. As we will prove in Chapter 2, we do have that $\abs{\CC} = \abs{\RR^2} = \abs{\RR}$ (where $\abs{}$ denotes cardinality of the set, to be defined shortly), so there does exist a bijection (i.e. a function that is both injective/one-to-one and surjective/onto; we will define these terms precisely in the next chapter) between the two sets.
As a Lemma, we have that the only injective function $f: \QQ \mapsto \RR$ that satisfies $f(a+b) = f(a) + f(b)$ and $f(a\cdot b) = f(a)\cdot f(b)$ is $f(x) = x$. The proof of this is left as a homework problem (HW2). Therefore, it follows that the only injective function $g: \QQ \times \set{0} \mapsto \RR$ (where $\times$ denotes the Cartesian product) is given by $g((x, 0)) = x$. We now give a proof that $\CC$ is not a subfield of $\RR$.
\begin{proof}
Suppose then for the sake of contradiction that there exists an injective function $\psi: \QQ \times \set{0, 1} \mapsto \RR$. Such a function then must satisfy$\psi(i \cdot i) = \psi(-1) = -1$, and $\psi(i \cdot i) = \psi(i) \cdot \psi(i) = \psi((0, 1))\cdot \psi((0, 1)) = 0 \cdot 0 = 0$ which is a contradiction. Hence, no such injection exists from $\QQ \times \set{0, 1}$ to $\RR$ and hence no such injection could exist from $\CC$ ($\RR^2$) to $\RR$. Hence $\CC$ is not a subfield of $\RR$.
\end{proof}
\begin{definition}{Real/Imaginary Parts and Complex Conjugates}{1.30}
Let $z = a + bi \in \CC$. Then, $\Re(z) = a$ is the \textbf{real part} of $z$ and $\Im(z) = b$ is the \textbf{imaginary part} of $z$. The \textbf{complex conjugate} of $z$, denoted by $\bar{z}$, is defined as $\bar{z} = a - bi$.
\end{definition}
\begin{theorem}{}{1.31}
Let $z, w \in \CC$. It then follows that:
\begin{enumerate}
\item $\overline{z + w} = \bar{z} + \bar{w}$.
\item $\overline{zw} = \bar{z} \cdot \bar{w}$.
\item $z + \bar{z} = 2\Re(z)$, $z - \bar{z} = 2i\Im(z)$.
\item $z\bar{z}$ is real and positive (except when $z = 0$).
\end{enumerate}
\end{theorem}
\begin{nproof}
We prove (d). We have that:
\begin{align*}
z\bar{z} = (a + bi)(a-bi) = a^2 + b^2
\end{align*}
$a^2 + b^2 \geq 0$, and $a^2 + b^2 = 0 \iff a = 0, b = 0$ which proves the claim. \qed
\end{nproof}
\begin{definition}{Absolute Value}{1.32}
We define the \textbf{absolute value} $\abs{z}$ of a complex number $z$ as $\abs{z} = \sqrt{z\bar{z}}$. Note that if $a \in \RR$ and $z = (a, 0)$, then
\begin{align*}
\abs{z} = \sqrt{a^2} =
\begin{cases}
a & \text{if $a \geq 0$}
\\ -a & \text{if $a < 0$}
\end{cases}
\end{align*}
Hence if $a \in \RR$, we can define $\abs{a} = \abs{(a, 0)}$.
\end{definition}
\begin{theorem}{}{1.33}
Let $z, w \in \CC$.
\begin{enumerate}
\item $\abs{z} \geq 0$, $\abs{z} = 0 \iff z = 0$.
\item $\abs{\bar{z}} = \abs{z}$.
\item $\abs{z}\abs{w} = \abs{zw}$.
\item $\abs{\Re(z)} \leq \abs{z}$, $\abs{\Im(z)} \leq \abs{z}$.
\item $\abs{z + w} \leq \abs{z} + \abs{w}$.
\end{enumerate}
\end{theorem}
\begin{nproof}
We prove (d) and (e). Let $z, w \in \CC$, with $z = a + bi$. For (d) we have that $\Re(z) = a$, so \begin{align*}
\abs{\Re(a)} = \sqrt{a^2} \leq \sqrt{a^2 + b^2} = \abs{z}
\end{align*}
And an equivalent proof follows for $\Im(z)$. For (e), we have that:
\begin{align*}
\abs{z + w}^2 &= (z + w)(\overline{z + w})
\\ &= z\bar{z} + z\bar{w} + w\bar{z} + w\bar{w}
\\ &= \abs{z}^2 + 2\Re(z\bar{w}) + \abs{w}^2
\\ &\leq \abs{z}^2 + 2\abs{\Re(z\bar{w})} + \abs{y}^2 & \text{($\abs{x} > x$)}
\\ &= \abs{z}^2 + 2\abs{z\bar{w}} + \abs{w}^2 & \text{(1.33(d))}
\\ &= \abs{z}^2 + 2\abs{z}\abs{\bar{w}} + \abs{w}^2 & \text{(1.33(c))}
\\ &= \abs{z}^2 + 2\abs{z}\abs{w} + \abs{w}^2 & \text{(1.33(b))}
\\ &= (\abs{z} + \abs{w})^2
\end{align*}
The claim follows by taking square roots on both sides. \qed
\end{nproof}
\subsection{The Cauchy-Shwartz Inequality}
Recall the summation notation:
\begin{align*}
x_1 + x_2 + \ldots + x_n = \sum_{j=1}^{n}x_i
\end{align*}
\stepcounter{rudin}
\begin{theorem}{Cauchy-Shwartz Inequality}{1.35}
Let $a_1, \ldots, a_n, b_1, \ldots, b_n \in \CC$. We then have that:
\begin{align*}
\abs{\sum_{j=1}^na_j\bar{b}_j}^2 \leq \left(\sum_{j=1}^n\abs{a_j}^2\right)\left(\sum_{j=1}^n\abs{b_j}^2\right)
\end{align*}
\end{theorem}
\noindent Note that in the above theorem, both the RHS and the LHS are real numbers (check!) so the equality makes sense (recall that there is no order on $\CC$; in fact, it is impossible to define one).
A geometric interpretation of the above inequality is as follows. Let $\v{a}, \v{b}$ be vectors in $\CC^n$. Then, $\avg{\v{a}, \v{b}} = \sum_{j=1}^na_j\bar{b}_j$ is the inner product of $\v{a}$ and $\v{b}$. Then, the inequality says that $\abs{\avg{\v{a}, \v{b}}}^2 \leq \avg{\v{a}, \v{a}}\cdot\avg{\v{b}, \v{b}}$.
\begin{nproof}
Define $A = \sum_{j=1}^n\abs{a_j}^2$, $B = \sum_{j=1}^n\abs{b_j}^2$, and $C = \sum_{j=1}^na_j\bar{b}_j$. If $B = 0$ (that is, all of the $b_j$s are zero) then the LHS/RHS are both zero and we are done. So, let us assume that $B > 0$. Let $\lambda \in \CC$, and we then have that:
\begin{align*}
0 &\leq \sum_{j=1}^n\abs{a_j + \lambda b_j}^2 \\ &= \sum_{j=1}^n(a_j + \lambda b_j)(\bar{a}_j + \bar{\lambda}\bar{b}_j)
\\ &= \sum_{j=1}^n\abs{a_j}^2 + \bar{\lambda}\sum_{j=1}^na_j\bar{b}_j + \lambda\sum_{j=1}^n\bar{a}_jb_j + \abs{\lambda}^2\sum_{j=1}^n\abs{b_j}^2
\\ &= A + \bar{\lambda}C + \lambda\bar{C} + \abs{\lambda}^2B
\end{align*}
This inequality holds for any $\lambda$; it therefore holds for $\lambda = -\frac{C}{B}$, so:
\begin{align*}
0 &\leq A - \frac{\bar{C}}{B}C - \frac{C}{B}\bar{C} + \frac{C\bar{C}}{B^2}B
\\ &= A - \frac{\abs{C}^2}{B}
\end{align*}
So we therefore obtain that $\abs{C}^2 \leq AB$ which is the desired inequality. \qed
\end{nproof}
\noindent A natural question given any inequality is when does equality hold; the answer turns out to be if the vectors are linearly independent, that is, at least one of $\v{a} = \alpha \v{b}$ and $\v{b} = \beta \v{a}$ ($\alpha, \beta \in \CC$) hold. Note that we only require one of the two relations to hold; in the case that one of $\v{a}, \v{b}$ are $\v{0}$ (the vector of all zeros) both equalities cannot be true. It is left as a homework problem to verify equality in the Cauchy-Shwartz inequality if and only if at least one of the two conditions holds (HW3).
\subsection{Euclidean Space}
\begin{definition}{Euclidean k-space}{1.36}
If $k \in \NN$, define $\RR^k$ as the set of $k$-tuples of real numbers:
\begin{align*}
\RR^k = \set{\v{x} = (x_1, x_2, \ldots, x_k): x_1, x_2, \ldots, x_k \in \RR}
\end{align*}
We can then define vector addition as:
\begin{align*}
\v{x} + \v{y} = (x_1 + y_1, x_2 + y_2, \ldots, x_k + y_k)
\end{align*}
And scalar multiplication (for $\alpha \in \RR$) to be:
\begin{align*}
\alpha\v{x} = (\alpha x_1, \alpha x_2, \ldots, \alpha x_k)
\end{align*}
These operations make $\RR^k$ into a vector space over the real field. We can define the inner product over $\RR^k$ to be:
\begin{align*}
(\v{x}, \v{y}) = \v{x} \cdot \v{y} = \sum_{j=1}^k x_jy_j
\end{align*}
This allows us to define the norm of $\v{x}$ to be:
\begin{align*}
\abs{\v{x}} = \sqrt{\v{x} \cdot \v{x}} = \left(\sum_{j=1}^n x_j^2\right)^{1/2}
\end{align*}
$\RR^k$ with the above inner product and norm is called \textbf{Euclidean k-space}.
\end{definition}
\noindent We briefly remark that the above inner product we defined agrees with the inner product we defined over $\CC^k$; we can identify $r \in \RR$ with $(r, 0) \in \CC$, and hence recognize that $\RR^k \subset \CC^k$ where the imaginary part of each coordinate is zero. Then, for the inner product we get the exact same result, as $\bar{b}_j = b_j$ for any complex numbers with imaginary part zero. From this we can conclude that the Cauchy-Shwartz inequality also holds in $\RR^k$.
Note that although the field $\CC$ is $\RR^2$ with multiplication defined as in Definition \ref{def:1.24}, in general vector multiplication on $\RR^n$ is not well defined. That is, we cannot make $\RR^n$ into a field in general; though we can make it into a vector space, which has slightly less structure.
One possibly familiar notion of vector multiplication in $\RR^3$ is the cross product. For $\v{x} = (x_1, x_2, x_3)$ and $\v{y} = (y_1, y_2, y_3)$, the cross product is defined as:
\begin{align*}
\v{x} \times \v{y} = (x_2y_3 - x_3y_2, x_3y_1 - x_1y_3, x_1y_2 - x_2y_1)
\end{align*}
However, the cross product does not satisfy properties that would be necessary to make $\RR^3$ a field. For one, it is not commutative, but anticommutative; $\v{x} \times \v{y} = -\v{y} \times \v{x}$. One might ask whether vectors in $\RR^3$ have well-defind inverses, but even before that, there does not exist an identity vector in $\RR^3$ under the cross product! In fact, $\RR^3$ under vector addition and cross product multiplication can be viewed as a noncommutative ring without an identity.
Note that there is a more general notion of a ``wedge product'' between vectors in $\RR^n$. We are in a sense very ``lucky'' that in $\RR^3$, the wedge product of two vectors returns another vector in $\RR^3$.
\begin{theorem}{}{1.37}
Let $\v{x}, \v{y}, \v{z} \in \RR^k$, and $\alpha \in \RR$. Then:
\begin{enumerate}
\item $\abs{\v{x}} \geq 0$
\item $\abs{\v{x}} = 0 \iff \v{x} = (0, \ldots, 0)$. This is often denoted as $\v{0}$, the ``zero vector''.
\item $\abs{\alpha\v{x}} = \abs{\alpha}\abs{\v{x}}$
\item $\abs{\v{x} \cdot \v{y}} \leq \abs{\v{x}}\abs{\v{y}}$
\item $\abs{\v{x} + \v{y}} \leq \abs{\v{x}} + \abs{\v{y}}$
\item $\abs{\v{x} - \v{z}} \leq \abs{\v{x} - \v{y}} + \abs{\v{y} - \v{z}}$
\end{enumerate}
\end{theorem}
\noindent (e) and (f) are often called ``triangle inequalities''; a visual intuition for these inequalities is given in the following figure:
\begin{figure}[htbp]
\centering
\begin{tikzpicture}
\draw[black, thick] (0, 0) node[anchor=north] {$\v{z}$} -- (2, 2) node[anchor=south] {$\v{y}$} -- (-2, 2) node[anchor=south] {$\v{x}$} -- (0, 0);
\draw[] (1.6, 1) node[anchor=north] {$\abs{\v{y} - \v{z}}$};
\draw[] (-1.6, 1) node[anchor=north] {$\abs{\v{x} - \v{z}}$};
\draw[] (0, 2) node[anchor=south] {$\abs{\v{x} - \v{y}}$};
\end{tikzpicture}
\caption{Visual picture for Theorem \ref{thm:1.37}(f), drawn in $\RR^2$. Suppose we started at $\v{x}$ and wanted the shortest path to $\v{z}$; we could try walking directly to $\v{z}$, or we could try walking somewhere else first ($\v{y}$) and then to $\v{z}$. However, the theorem tells us that the direct path will always be shorter in Euclidean space.}
\label{fig3}
\end{figure}
\noindent Note that equality in part (f) arises if and only if $\v{y}$ lies on the line segment between $\v{x}$ and $\v{z}$.
\begin{nproof}
(a)-(c) are immediate, and (d) immediately follows from Theorem \ref{thm:1.35} (Cauchy-Shwartz). For (e), we have that:
\begin{align*}
\abs{\v{x} + \v{y}}^2 &= (\v{x} + \v{y})(\v{x} + \v{y})
\\ &= \abs{\v{x}}^2 + 2\v{x}\cdot\v{y} + \abs{\v{y}}^2
\\ &\leq \abs{\v{x}}^2 + \abs{2\v{x}\cdot\v{y}} + \abs{\v{y}}^2
\\ &\leq \abs{\v{x}}^2 + 2\abs{\v{x}}\abs{\v{y}} + \abs{\v{y}}^2 \quad \text{(1.37(d))}
\\ &= (\abs{\v{x}} + \abs{\v{y}})^2
\end{align*}
And the claim follows by taking square roots on both sides. For (f), substitute $\v{x} \mapsto \v{x} - \v{y}$ and $\v{y} \mapsto \v{y} - \v{z}$ into (e). \qed
\end{nproof}
Though we discuss the Euclidean norm here, it may also be of interest to consider/discuss other norms. One example is the $L_1$ norm (c.f. the norm discussed in Definition \ref{def:1.36}, which is the $L_2$ norm), which is the sum of the absolute values of each of the components. For $\v{x} = (x_1, x_2, \ldots, x_n)$ and $\v{y} = (y_1, y_2, \ldots, y_n)$ we have that:
\begin{align*}
\abs{\v{x}}_1 = \abs{x_1} + \abs{x_2} + \ldots + \abs{x_n}, \quad \abs{\v{x} - \v{y}}_{1} = \abs{x_1 - y_1} + \abs{x_2 - y_2} + \ldots + \abs{x_n - y_n}
\end{align*}
The $L_1$ norm is often called the ``Taxicab norm'' or the ``Manhattan norm'' as the way it quantifies distance is akin to walking in discrete NSEW chunks; much like a taxi running through a grid-like New York City!
\begin{figure}[htbp]
\centering
\begin{tikzpicture}
\draw[black, thick] (-3, 0) node[anchor=north] {$(0, 0)$} -- (1, 0) node[anchor=north] {$(x_1, 0)$} -- (1, 2) node[anchor=south] {$\v{x} = (x_1, x_2)$} -- (-3, 0);
\draw[] (-1, 0) node[anchor=north] {$\abs{x_1}$};
\draw[] (1, 1) node[anchor=west] {$\abs{x_2}$};
\draw[] (-2, 1.2) node[anchor=south] {$\abs{\v{x}}_2 = \sqrt{x_1^2 + x_2^2}$};
\draw[] (3.5, 1) node {$\abs{\v{x}}_{1} = \abs{x_1} + \abs{x_2}$};
\end{tikzpicture}
\caption{Visual comparison of the $L_1$ and $L_2$ norms in $\RR^2$.}
\label{fig4}
\end{figure}
\noindent We are free to generalize this notion to the $L_n$ norm, and we may also define the $L_{\infty}$ norm, which for $\v{x} \in \RR^n$ is defined as:
\begin{align*}
\abs{\v{x}}_\infty = \max_i \abs{x_i}
\end{align*}
In general for any $\v{x} \in \RR^n$, we have that $\abs{x}_1 \geq \abs{x}_2 \geq \abs{x}_3 \geq \ldots \geq \abs{x}_\infty$. We note that we that we can generalize these norms to the cases where we have infinite components:
\begin{align*}
\norm{\v{x}}_p = \left(\sum_{i = 1}^\infty \abs{x_i}^p\right)^{1/p} < \infty \quad
\|f\|_{p} \equiv\left(\int_{S}|f|^{p} \mathrm{~d} \mu\right)^{1 / p}<\infty
\end{align*}
Which allow us to define norms for function spaces. However, a detailed discussion of these are beyond the scope of this course (to be covered in a later course in functional analysis!) Moreover, we haven't even defined what an infinite sum or integral are yet, which we will get to in later chapters. | {
"alphanum_fraction": 0.6340807774,
"avg_line_length": 81.0460081191,
"ext": "tex",
"hexsha": "7bc8e55b492e3c2d367b0d3830c678bdf6cb5eb9",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "532c4bf12a8e4ea80a58a83508de05e1f121a79a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RioWeil/MATH320-321-notes",
"max_forks_repo_path": "Chapters/ch1.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "532c4bf12a8e4ea80a58a83508de05e1f121a79a",
"max_issues_repo_issues_event_max_datetime": "2021-07-25T17:05:30.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-10T23:18:29.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RioWeil/MATH320-321-Notes",
"max_issues_repo_path": "Chapters/ch1.tex",
"max_line_length": 826,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "532c4bf12a8e4ea80a58a83508de05e1f121a79a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RioWeil/MATH320-321-Notes",
"max_stars_repo_path": "Chapters/ch1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 20318,
"size": 59893
} |
\begin{algorithm}[H]
\caption{Solve Sub model}\label{algo:solve-submodel}
\begin{algorithmic}[1]
\REQUIRE $submodel: \text{Submodel from \Cref{algo:submodel-build}},\ time: \text{Time limit}$
\STATE {$lin\_model = MIPSolver::NewLP(submodel,\ time);$}
\STATE {$lin\_solution,\ status = lin\_model.solve(); $}
\IF {$ status\ \textbf{is}\ OPTIMAL $}
\STATE $int\_model = MIPSolver::NewMIP(submodel,\ time);$
\STATE $int\_model.solution\_count(1);$
\STATE $int\_sol, status = lin\_model.solve();$
\IF {$ status\ \textbf{is}\ INFEASIBLE $}
\RETURN {$lin\_solution, LIN\_FEASIBLE$}
\ELSIF{$ status\ \textbf{id}\ TIMEOUT $}
\RETURN {$\emptyset, TIMEOUT $}
\ELSE
\RETURN {$lin\_solution, INT\_FEASIBLE$}
\ENDIF
\ELSE
\RETURN {$\emptyset, INFEASIBLE$}
\ENDIF
\end{algorithmic}
\end{algorithm}
\paragraph{Description} \Cref{algo:solve-submodel} requires as input a valid submodel, from \Cref{algo:submodel-build} and the current time limit. The module $MIPSolver$
is an abstraction of a general purpose MIP solver, it has two constructors:
\begin{itemize}
\item NewLP : create a \emph{Model} object from the sub model, with the specified time limit. All integer constraints are ignored.
\item NewMIP: create a \emph{Model} object from the sub model, with the specified time limit. All integer constraints are preserved.
\end{itemize}
This \emph{Model} object has the \emph{solve} method. This method solves the problems according to the model configuration, for example time limit or solution count limit.
\emph{solve} return two values: the first one is the model solution (i.e. a \href{https://en.wikipedia.org/wiki/Associative_array}{dictionary}),
the second is the optimization
status. The possible output\footnote{The actual MIP solver may support more than this statuses, but those are the strictly required to implement Feature Kernel } statuses are:
\begin{enumerate}
\item OPTIMAL: the optimization ended with a proven optimal result,
\item INFEASIBLE: the model is infeasible,
\item TIMEOUT: the optimization took more then the given timeout
\item SOLUTION\_COUNT: the optimization found the maximum number of feasible solution, but is not possible to prove that one of this solutions is optimal.
\end{enumerate}
Notice that the statuses from the MIP solver and the statuses returned by \Cref{algo:solve-submodel} are not the same: statuses from \Cref{algo:solve-submodel} can be
considered as inner constants.
The possible outcomes are straightforward as described in \Cref{sec:submodel-result}, according to the optimization status \Cref{algo:solve-submodel} returns the current
solution (if available) and the appropriate status. | {
"alphanum_fraction": 0.7026370576,
"avg_line_length": 62.652173913,
"ext": "tex",
"hexsha": "34cda45bbb5098225acc3f7c42886f3b881ce5e8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "157c3775c5ca239000557d18cf3d6bbb3b350e33",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "Optimization-Algorithms/Feature-Kernel",
"max_forks_repo_path": "algorithms/solve_sub_model.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "157c3775c5ca239000557d18cf3d6bbb3b350e33",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "Optimization-Algorithms/Feature-Kernel",
"max_issues_repo_path": "algorithms/solve_sub_model.tex",
"max_line_length": 176,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "157c3775c5ca239000557d18cf3d6bbb3b350e33",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "Optimization-Algorithms/Feature-Kernel",
"max_stars_repo_path": "algorithms/solve_sub_model.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 734,
"size": 2882
} |
% !TEX program = xelatex
% general resume
% by EULIR
\documentclass{resume}
\begin{document}
\pagenumbering{gobble} % suppress displaying page number
\name{RUNMING LI}
\basicInfo{
\email{runmingli\[email protected]} \textperiodcentered\
\phone{(+86) 139-8001-6050} \textperiodcentered\
\github[EULIR]{https://github.com/EULIR} \textperiodcentered\
\linkedin[RUNMING LI]{www.linkedin.com/in/runming}
}
\section{\faGraduationCap\ Education}
\datedsubsection{\textbf{Chengdu No.7 High School Linyin Campus}, Sichuan, China}{Sep. 2017 -- Current}
\section{\faUsers\ Work Experience}
\datedsubsection{\textbf{Sobey Digital Technology Co., Ltd.}, Sichuan, China}{March. 2019 -- April. 2019}
\role{Intern}{Assistant Tester, working on EditMax nonlinear editing system development}
\section{\faGithubAlt\ Personal Projects}
\datedsubsection{\textbf{EULIR Extended Math Library}}{\url{https://github.com/EULIR/Extended-Math-Library}}
Math API under JVM.
\begin{itemize}
\item Implements mathematical formulas and expressions natively.
\item Contains statistical formulas and calculations.
\item Unit test driven library, covering 100 percent classes; uses JUnit Framework.
\item JavaDoc page on {\url{https://eulir.github.io/eeml}}.
\end{itemize}
\datedsubsection{\textbf{Project Euler}}{\url{https://github.com/EULIR/PROJECT-EULER}}
Java solutions to Project Euler.
\begin{itemize}
\item Contains Java solutions to more than 100 project Euler problems.
\item Ranked top 1 percent across the site.
\end{itemize}
%\datedsubsection{\textbf{CAPTCHAs-Solver}}{\url{https://github.com/EULIR/CAPTCHAs-Solver}}
%Captcha slover written in Python.
%\begin{itemize}
% \item Light captcha solver, containing only four py files.
% \item Allows vague recognition.
% \item Able to recognize alphabets and figures.
% \item 4 characters recognition supported currently.
%\end{itemize}
\datedsubsection{\textbf{Brainf**k Interpreter}}{\url{https://github.com/EULIR/Brainfuck-interpreter}}
Interpreter for Brainf**k programming language written in Java.
\begin{itemize}
\item Interprets brainf**k syntax 100\% correct.
\item Contains syntactic errors detection mechanism.
\end{itemize}
\datedsubsection{\textbf{Form Drawer}}{\url{https://github.com/GreatChina-CSClub/form-drawer}}
Simple form drawer for data visualization.
\begin{itemize}
\item Supports historgram and line chart in both simple and layered form.
\item Write to file mode enabled, generating images in various type.
\item Pictures can be dynamically resized.
\end{itemize}
\section{\faCogs\ Skills}
\begin{itemize}[parsep=0.5ex]
\item \textbf{Programming Languages}:
skilled at Java; experienced in Kotlin/Python/C/Pascal; four years experience of programming with at least one year in each programming language.
\item \textbf{Website Design}:
experienced in HTML/CSS, personal sites including {\url{http://www.neofuture.tech}} and {\url{http://www.cmstation.cn}}.
\item \textbf{Developing Tools}:
can adapt to any editors/OSs, normally use JetBrains IDEs and Notepad++ under Windows and CentOS; have experience with team tools like git, GitHub, Coding.net.
\item \textbf{Compiler}:
understand basic level complier, including principles, techniques, and tools.
\item \textbf{Statistics}:
one year of college level statistics experience.
\end{itemize}
\section{\faHeartO\ Achievements}
\datedline{\textbf{Top 1 percent} on \href{https://projecteuler.net} {Project Euler} across the whole site}{Mar. 2018}
\section{\faInfo\ Miscellaneous}
\begin{itemize}[parsep=0.5ex]
%\item Blog: \url{https://eulir.github.io/}
\item Opensource contributions: more than 700 contributions in the past one year on GitHub \url{https://github.com/EULIR}
\item Languages: English - professional working proficiency (TOEFL 111), Mandarin Chinese - native speaker
\item Get the latest version of this resume: \url{https://tinyurl.com/eulir-resume}
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.7626820693,
"avg_line_length": 42.3617021277,
"ext": "tex",
"hexsha": "ab01298c315a028419d548cf01f0e96d9d317ba4",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2018-12-28T14:40:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-12-28T13:46:04.000Z",
"max_forks_repo_head_hexsha": "15a678938d4b802b4af4c5a2812a0adfa2bd4fd1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "EULIR/Resume",
"max_forks_repo_path": "resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "15a678938d4b802b4af4c5a2812a0adfa2bd4fd1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "EULIR/Resume",
"max_issues_repo_path": "resume.tex",
"max_line_length": 163,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "15a678938d4b802b4af4c5a2812a0adfa2bd4fd1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "EULIR/Resume",
"max_stars_repo_path": "resume.tex",
"max_stars_repo_stars_event_max_datetime": "2019-12-12T09:12:29.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-12-28T14:39:56.000Z",
"num_tokens": 1079,
"size": 3982
} |
\chapter{Nuclear solutions}
\begin{abox}
Practice set 1 solutions
\end{abox}
\begin{enumerate}
\item The radius of a ${ }_{29}^{64} \mathrm{Cu}$ nucleus is measured to be $4.8 \times 10^{-13} \mathrm{~cm}$.
{\exyear{NET JUNE 2011}}\\
(A) The radius of a ${ }_{12}^{27} \mathrm{Mg}$ nucleus can be estimated to be
\begin{tasks}(2)
\task[\textbf{A.}] $2.86 \times 10^{-13} \mathrm{~cm}$
\task[\textbf{B.}]$5.2 \times 10^{-13} \mathrm{~cm}$
\task[\textbf{C.}] $3.6 \times 10^{-13} \mathrm{~cm}$
\task[\textbf{D.}]$8.6 \times 10^{-13} \mathrm{~cm}$
\end{tasks}
\begin{answer}
\begin{align*}
R&=R_{0}(A)^{1 / 3} \Rightarrow \frac{R_{M g}}{R_{C u}}=\left(\frac{A_{M g}}{A_{C u}}\right)^{1 / 3}=\left(\frac{27}{64}\right)^{1 / 3}\\
\Rightarrow \frac{R_{M g}}{R_{C u}}&=\frac{3}{4} \Rightarrow R_{M g}=\frac{3}{4} \times 4.8 \times 10^{-13}=3.6 \times 10^{-13} \mathrm{~cm}
\end{align*}
The correct option is \textbf{(c)}
\end{answer}
(B) The root-mean-square (r.m.s) energy of a nucleon in a nucleus of atomic number $A$ in its ground state varies as:
\begin{tasks}(2)
\task[\textbf{A.}] $A^{4 / 3}$
\task[\textbf{B.}]$A^{1 / 3}$
\task[\textbf{C.}] $A^{-1 / 3}$
\task[\textbf{D.}] $A^{-2 / 3}$
\end{tasks}
\begin{answer}
The correct option is \textbf{(c)}
\end{answer}
\item According to the shell model the spin and parity of the two nuclei ${ }_{51}^{125} S b$ and ${ }_{38}^{89} \mathrm{Sr}$ are, respectively,
{\exyear{NET DEC 2011}}\\
\begin{tasks}(2)
\task[\textbf{A.}] $\left(\frac{5}{2}\right)^{+}$and $\left(\frac{5}{2}\right)^{+}$
\task[\textbf{B.}]$\left(\frac{5}{2}\right)^{+}$and $\left(\frac{7}{2}\right)^{+}$
\task[\textbf{C.}]$\left(\frac{7}{2}\right)^{+}$and $\left(\frac{5}{2}\right)^{+}$
\task[\textbf{D.}]$\left(\frac{7}{2}\right)^{+}$and $\left(\frac{7}{2}\right)^{+}$
\end{tasks}
\begin{answer}
${ }_{51}^{125} \mathrm{Sb} ; Z=51$ and $N=74$
$$
Z=51
$$
$$
\left(s_{1 / 2}\right)^{2}\left(p_{3 / 2}\right)^{4}\left(p_{1 / 2}\right)^{2}\left(d_{5 / 2}\right)^{6}\left(s_{1 / 2}\right)^{2}\left(d_{3 / 2}\right)^{4}\left(f_{7 / 2}\right)^{8}\left(p_{3 / 2}\right)^{4}\left(f_{5 / 2}\right)^{6}\left(p_{1 / 2}\right)^{2}\left(g_{9 / 2}\right)^{10}\left(g_{7 / 2}\right)^{1}
$$
$\Rightarrow j=\frac{7}{2}$ and $l=4$. Thus spin and parity $=\left(\frac{7}{2}\right)^{+}$\\
\begin{align*}
&{ }_{38}^{89} S r ; Z=38 \text { and } N=51 \\
&N=51: \\
&\left(s_{1 / 2}\right)^{2}\left(p_{3 / 2}\right)^{4}\left(p_{1 / 2}\right)^{2}\left(d_{5 / 2}\right)^{6}\left(s_{1 / 2}\right)^{2}\left(d_{3 / 2}\right)^{4}\left(f_{7 / 2}\right)^{8}\left(p_{3 / 2}\right)^{4}\left(f_{5 / 2}\right)^{6}\left(p_{1 / 2}\right)^{2}\left(g_{9 / 2}\right)^{10}\left(g_{7 / 2}\right)^{1} \\
&\Rightarrow j=\frac{7}{2} \text { and } l=4 \text {. Thus spin and parity }=\left(\frac{7}{2}\right)^{+}
\end{align*}
The correct option is \textbf{(d)}
\end{answer}
\item The difference in the Coulomb energy between the mirror nuclei ${ }_{24}^{49} \mathrm{Cr}$ and ${ }_{25}^{49} \mathrm{Mn}$ is 6.0 MeV. Assuming that the nuclei have a spherically symmetric charge distribution and that $e^{2}$ is approximately $1.0 \mathrm{MeV}-\mathrm{fm}$, the radius of the ${ }_{25}^{49} \mathrm{Mn}$ nucleus is
{\exyear{NET DEC 2011}}\\
\begin{tasks}(2)
\task[\textbf{A.}] $4.9 \times 10^{-13} \mathrm{~m}$
\task[\textbf{B.}]$4.9 \times 10^{-15} \mathrm{~m}$
\task[\textbf{C.}]$5.1 \times 10^{-13} \mathrm{~m}$
\task[\textbf{D.}]$5.1 \times 10^{-15} \mathrm{~m}$
\end{tasks}
\begin{answer}
$$
R=\frac{3 e^{2}}{5 \cdot \Delta W}\left(Z_{1}^{2}-Z_{2}^{2}\right)=\frac{3 \times 1 \times 10^{-15}}{5 \times 6}\left(25^{2}-24^{2}\right)=4.9 \times 10^{-15} \mathrm{~m}
$$
The correct option is \textbf{(b)}
\end{answer}
\item The ground state of ${ }_{12}^{207} \mathrm{~Pb}$ nucleus has spin-parity $J^{p}=\frac{1^{-}}{2}$, while the first excited state has $J^{p}=\frac{5^{-}}{2}$.The electromagnetic radiation emitted when the nucleus makes a transition from the first excited state to ground state are
{\exyear{NET JUNE 2012}}\\
\begin{tasks}(2)
\task[\textbf{A.}] E2 and E3
\task[\textbf{B.}] M2 or E3
\task[\textbf{C.}] E2 or M3
\task[\textbf{D.}] M2 or M3
\end{tasks}
\begin{answer}
No parity change; $\Delta J=2,3$\\
For $E_{l}$ type, $\Delta \pi=(-1)^{l}$, (for no parity change $l=2$ )\\
For $M_{l}$ type, $\Delta \pi=(-1)^{l+1}$, (for no parity change $l=3$ )\\
$\Delta J=2$, No parity change $\rightarrow E 2 ; \Delta J=3$, No parity change $\rightarrow M 3$\\
The correct option is \textbf{(c)}
\end{answer}
\item The binding energy of a light nucleus $(Z, A)$ in $\mathrm{MeV}$ is given by the approximate formula
$$
B(A, Z) \approx 16 A-20 A^{2 / 3}-\frac{3}{4} Z^{2} A^{-1 / 3}+30 \frac{(N-Z)^{2}}{A}
$$
where $N=A-Z$ is the neutron number. The value of $Z$ of the most stable isobar for a given $A$ is
{\exyear{NET JUNE 2013}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{A}{2}\left(1-\frac{A^{2 / 3}}{160}\right)^{-1} \quad$
\task[\textbf{B.}]$\frac{A}{2}$
\task[\textbf{C.}]$\frac{A}{2}\left(1-\frac{A^{2 / 3}}{120}\right)^{-1}$
\task[\textbf{D.}]$\frac{A}{2}\left(1+\frac{A^{4 / 3}}{64}\right)^{-1}$
\end{tasks}
\begin{answer}
$$\left.\frac{\partial B}{\partial Z}\right|_{Z=Z^{\prime}}=0 \Rightarrow Z^{\prime}=\frac{A}{2}\left(1-\frac{A^{2 / 3}}{160}\right)^{-1}$$
The correct option is \textbf{(a)}
\end{answer}
\item The intrinsic electric dipole moment of a nucleus ${ }_{Z}^{A} X$
{\exyear{NET DEC 2013}}
\begin{tasks}(2)
\task[\textbf{A.}] increases with $Z$, but independent of $A$
\task[\textbf{B.}]decreases with $Z$, but independent of $A$
\task[\textbf{C.}]is always zero
\task[\textbf{D.}]increases with $Z$ and $A$
\end{tasks}
\begin{answer}
The correct option is \textbf{(d)}
\end{answer}
\item According to the shell model, the total angular momentum (in units of $\hbar$ ) and the parity of the ground state of the ${ }_{3}^{7} L i$ nucleus is
{\exyear{NET DEC 2013}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{3}{2}$ with negative parity
\task[\textbf{B.}] $\frac{3}{2}$ with positive parity
\task[\textbf{C.}]$\frac{1}{2}$ with positive parity
\task[\textbf{D.}]$\frac{7}{2}$ with negative parity
\end{tasks}
\begin{answer}
$Z=3, N=4$\\
For odd $Z=3 ;\left(s_{1 / 2}^{2}\right)\left(p_{3 / 2}^{1}\right) \Rightarrow j=3 / 2, l=1$ and parity $=(-1)^{1}=-1$.
\end{answer}
\item If the binding energy $B$ of a nucleus (mass number $A$ and charge $Z$ ) is given by
$$
B=a_{V} A-a_{S} A^{2 / 3}-a_{s y m} \frac{(2 Z-A)^{2}}{A}+\frac{a_{C} Z^{2}}{A^{1 / 3}}
$$
where $a_{V}=16 \mathrm{MeV}, a_{S}=16 \mathrm{MeV}, a_{s y m}=24 \mathrm{MeV}$ and $a_{C}=0.75 \mathrm{MeV}$, then for the most stable isobar for a nucleus with $A=216$ is
{\exyear{NET DEC 2014}}
\begin{tasks}(2)
\task[\textbf{A.}] 68
\task[\textbf{B.}]72
\task[\textbf{C.}]84
\task[\textbf{D.}]92
\end{tasks}
\begin{answer}
\begin{align*}
&\text { For the most stable isobar for a nucleus }\\
\frac{d B}{d Z}&=0 \Rightarrow-a_{s y m} \frac{2(2 Z-A) \times 2}{A}+\frac{2 a_{C} Z}{A^{1 / 3}}=0\\
&\Rightarrow 24 \frac{2(2 Z-216) \times 2}{216}+0.75 \frac{2 Z}{(216)^{1 / 3}}=0 \Rightarrow \frac{4(2 Z-216)}{9}+\frac{3}{4} \frac{2 Z}{6}=0\\
&\Rightarrow \frac{4(2 Z-216)}{9}+\frac{Z}{4}=0 \Rightarrow 16(2 Z-216)+9 Z=0 \Rightarrow 41 Z=216 \times 16 \Rightarrow Z=82.3
\end{align*}
The correct option is \textbf{(c)}
\end{answer}
\item Let us approximate the nuclear potential in the shell model by a three dimensional isotropic harmonic oscillator. Since the lowest two energy levels have angular momenta $l=0$ and $l=1$ respectively, which of the following two nuclei have magic numbers of protons and neutrons?
{\exyear{NET JUNE 2015}}
\begin{tasks}(2)
\task[\textbf{A.}] ${ }_{2}^{4} \mathrm{He}$ and ${ }_{8}^{16} \mathrm{O}$
\task[\textbf{B.}]${ }_{1}^{2} D$ and ${ }_{4}^{8} B e$
\task[\textbf{C.}]${ }_{2}^{4} \mathrm{He}$ and ${ }_{4}^{8} \mathrm{Be}$
\task[\textbf{D.}]${ }_{2}^{4} \mathrm{He}$ and ${ }_{6}^{12} \mathrm{C}$
\end{tasks}
\begin{answer}
${ }_{2} H e^{4}$ has $Z=2, N=2$\\
${ }_{8} O^{16}$ has $Z=8, N=8$ magic numbers $(2,8,20,28,50,82,126)$\\
The correct option is \textbf{(a)}
\end{answer}
\item Of the nuclei of mass number $A=125$, the binding energy calculated from the liquid drop model (given that the coefficients for the Coulomb and the asymmetry energy are $a_{c}=0.7 \mathrm{MeV}$ and $a_{s y m}=22.5 \mathrm{MeV}$ respectively) is a maximum for
{\exyear{NET DEC 2015}}
\begin{tasks}(2)
\task[\textbf{A.}] ${ }_{54}^{125} \mathrm{Xe}$
\task[\textbf{B.}] ${ }_{53}^{124} I$
\task[\textbf{C.}]${ }_{52}^{125} \mathrm{Te}$
\task[\textbf{D.}]${ }_{51}^{125} \mathrm{Sb}$
\end{tasks}
\begin{answer}
\begin{align*}
Z_{0}&=\frac{4 a_{a}+a_{c} A^{-1 / 3}}{2 a_{c} A^{-1 / 3}+8 a_{a} A^{-1}}=\frac{4 a_{a} A+a_{c} A^{2 / 3}}{8 a_{a}+2 a_{c} A^{2 / 3}}\\
Z_{0}&=\frac{4 \times 22.5 \times 125+0.7\left(5^{3}\right)^{2 / 3}}{8 \times 22.5+2 \times 0.7\left(5^{3}\right)^{2 / 3}}\\
&=\frac{11250+17.5}{180+35}=\frac{11267.5}{215}=52.4 \Rightarrow Z_{0} \approx 52
\end{align*}
The correct option is \textbf{(c)}
\end{answer}
\item A radioactive element $X$ decays to $Y$, which in turn decays to a stable element $Z$. The decay constant from $X$ to $Y$ is $\lambda_{1}$, and that from $Y$ to $Z$ is $\lambda_{2}$. If, to begin with, there are only $N_{0}$ atoms of $X$, at short times $\left(t \ll \frac{1}{\lambda_{1}}\right.$ as well as $\left.\frac{1}{\lambda_{2}}\right)$ the number of atoms of $Z$ will be
{\exyear{NET JUNE 2016}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{1}{2} \lambda_{1} \lambda_{2} N_{0} t^{2}$
\task[\textbf{B.}]$\frac{\lambda_{1} \lambda_{2}}{2\left(\lambda_{1}+\lambda_{2}\right)} N_{0} t$
\task[\textbf{C.}]$\left(\lambda_{1}+\lambda_{2}\right)^{2} N_{0} t^{2}$
\task[\textbf{D.}] $\left(\lambda_{1}+\lambda_{2}\right) N_{0} t$
\end{tasks}
\begin{answer}
\begin{align*}
X \stackrel{\lambda_{1}}{\longrightarrow} Y \stackrel{\lambda_{2}}{\longrightarrow} Z\\
\begin{array}{cccc}
t=0 & N_{0} & 0 & 0 \\
t & N_{1} & N_{2} & N_{3}
\end{array}
\end{align*}
\begin{align*}
&\text { Rate equations }\\
N_{1}&=N_{0} e^{-\lambda_{1} t}, \frac{d N_{2}}{d t}=\lambda_{1} N_{1}-\lambda_{2} N_{2}, \frac{d N_{3}}{d t}=\lambda_{2} N_{2}\\
N_{3}&=N_{0}\left[1+\frac{\lambda_{1} e^{-\lambda_{2} t}}{\left(\lambda_{2}-\lambda_{1}\right)}-\frac{\lambda_{2} e^{-\lambda_{1} t}}{\left(\lambda_{2}-\lambda_{1}\right)}\right]\\
&=N_{0}\left[1+\frac{\lambda_{1}}{\left(\lambda_{2}-\lambda_{1}\right)}\left(1-\lambda_{2} t+\frac{\lambda_{2}^{2} t^{2}}{2}\right)-\frac{\lambda_{2}}{\left(\lambda_{2}-\lambda_{1}\right)}\left(1-\lambda_{1} t+\frac{\lambda_{1}^{2} t^{2}}{2}\right)\right]\\
&=N_{0}\left[1+\frac{\lambda_{1}}{\left(\lambda_{2}-\lambda_{1}\right)}-\frac{\lambda_{1} \lambda_{2} t}{\left(\lambda_{2}-\lambda_{1}\right)}+\frac{\lambda_{1}}{\left(\lambda_{2}-\lambda_{1}\right)} \frac{\lambda_{2}^{2} t^{2}}{2}\right.\\
&\left.-\frac{\lambda_{2}}{\left(\lambda_{2}-\lambda_{1}\right)}+\frac{\lambda_{2} \lambda_{1} t}{\left(\lambda_{2}-\lambda_{1}\right)}-\frac{\lambda_{2}}{\left(\lambda_{2}-\lambda_{1}\right)} \frac{\lambda_{1}^{2} t^{2}}{2}\right]\\
&=N_{0}\left[\frac{\lambda_{1}}{\left(\lambda_{2}-\lambda_{1}\right)} \times \frac{\lambda_{2}^{2} t^{2}}{2}-\frac{\lambda_{2}}{\left(\lambda_{2}-\lambda_{1}\right)} \times \frac{\lambda_{1}^{2} t^{2}}{2}\right]\\
&=\frac{\lambda_{1} \lambda_{2} t^{2}}{2} N_{0}\left[\frac{\lambda_{2}}{\lambda_{2}-\lambda_{1}}-\frac{\lambda_{1}}{\lambda_{2}-\lambda_{1}}\right]=\frac{1}{2} \lambda_{1} \lambda_{2} N_{0} t^{2}
\end{align*}
The correct option is \textbf{(a)}
\end{answer}
\item Let $E_{S}$ denotes the contribution of the surface energy per nucleon in the liquid drop model. The ratio $E_{S}\left({ }_{13}^{27} \mathrm{Al}\right): E_{S}\left({ }_{30}^{64} \mathrm{Zn}\right)$ is
{\exyear{NET JUNE 2016}}
\begin{tasks}(2)
\task[\textbf{A.}](a) $2: 3$
\task[\textbf{B.}] $4: 3$
\task[\textbf{C.}] $5: 3$
\task[\textbf{D.}] $3: 2$
\end{tasks}
\begin{answer}
$$E_{S}=\frac{B}{A}=\frac{A^{\frac{2}{3}}}{A} \propto A^{-\frac{1}{3}} \Rightarrow \frac{E_{S}(A l)}{E_{S}\left(Z_{n}\right)}=\frac{(27)^{-\frac{1}{3}}}{(64)^{-\frac{1}{3}}}=\frac{(64)^{\frac{1}{3}}}{(27)^{\frac{1}{3}}}=\frac{4}{3}$$
The correct option is \textbf{(b)}
\end{answer}
\item According to the shell model, the nuclear magnetic moment of the ${ }_{13}^{27} \mathrm{Al}$ nucleus is (Given that for a proton $g_{l}=1, g_{s}=5.586$, and for a neutron $g_{l}=0, g_{s}=-3.826$ )
{\exyear{NET JUNE 2016}}
\begin{tasks}(2)
\task[\textbf{A.}] $-1.913 \mu_{N}$
\task[\textbf{B.}]$14.414 \mu_{N}$
\task[\textbf{C.}]$4.793 \mu_{N}$
\task[\textbf{D.}]0
\end{tasks}
\begin{answer}
$${ }_{13} A l^{27}: Z=13, N=14 \text { for } Z=13, S_{1 / 2}^{2}, P_{3 / 2}^{4}, P_{1 / 2}^{2}, d_{5 / 2}^{5} \Rightarrow j=\frac{5}{2}, l=2$$
$$
\text { Magnetic moment, } \mu=\frac{1}{2}\left[2 j-1+g_{S}\right] \mu_{N}=\frac{1}{2}\left[2 \times \frac{5}{2}-1+5.586\right] \mu_{N} \Rightarrow \mu=4.793 \mu_{N}
$$
The correct option is \textbf{(c)}
\end{answer}
\item The spin-parity assignments for the ground and first excited states of the isotope ${ }_{28}^{57} N i$, in the single particle shell model, are
{\exyear{NET DEC 2017}}
\begin{tasks}(2)
\task[\textbf{A.}] $\left(\frac{1}{2}\right)^{-}$and $\left(\frac{3}{2}\right)^{-}$
\task[\textbf{B.}]$\left(\frac{5}{2}\right)^{+}$and $\left(\frac{7}{2}\right)^{+}$
\task[\textbf{C.}]$\left(\frac{3}{2}\right)^{+}$and $\left(\frac{5}{2}\right)^{+}$
\task[\textbf{D.}]$\left(\frac{3}{2}\right)^{-}$and $\left(\frac{5}{2}\right)^{-}$
\end{tasks}
\begin{answer}
For ${ }_{28} N i^{57}: \quad P=28, N=29 \rightarrow$ will decide the $j^{P}$\\
So, for $N=29$, ground state configuration,\\\\
$1 s_{1 / 2}^{2} 1 p_{3 / 2}^{4} 1 p_{1 / 2}^{2} 1 d_{5 / 2}^{6} 2 s_{1 / 2}^{2} 1 d_{3 / 2}^{4} 1 f_{7 / 2}^{8} 2 p_{3 / 2}^{1}$\\\\
So, $j=\frac{3}{2}, l=1$\\
Spin parity for ground state of ${ }_{28} N i^{57} \rightarrow\left(\frac{3}{2}\right)^{-}$\\
For first excited state,\\
\begin{align*}
&1 s_{1 / 2}^{2} 1 p_{3 / 2}^{4} 1 p_{1 / 2}^{2} 1 d_{5 / 2}^{6} 2 s_{1 / 2}^{2} 1 d_{3 / 2}^{4} 1 f_{7 / 2}^{8} 2 p_{3 / 2}^{1} \rightarrow 1 f_{5 / 2} \\
&P=\frac{5}{2}, l=3 \Rightarrow \text { spin parity } \rightarrow\left(\frac{5}{2}\right)^{-}
\end{align*}
The correct option is \textbf{(d)}
\end{answer}
\end{enumerate}
\newpage
\begin{abox}
Practice set 2 solutions
\end{abox}
\begin{enumerate}
\item In the nuclear shell model the spin parity of ${ }_{7}^{15} N$ is given by
{\exyear{GATE 2010}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{1^{-}}{2}$
\task[\textbf{B.}]$\frac{1^{+}}{2}$
\task[\textbf{C.}]$\frac{3^{-}}{2}$
\task[\textbf{D.}]$\frac{3^{+}}{2}$
\end{tasks}
\begin{answer}
$$
Z=7 ;\left(s_{1 / 2}\right)^{2}\left(p_{3 / 2}\right)^{4}\left(p_{1 / 2}\right)^{1} \text { and } N=8
$$
$$
l=1, J=\frac{1}{2} \Rightarrow \text { parity }=(-1)^{1}=-1, \quad \text { spin }-\text { parity }=\left(\frac{1}{2}\right)^{-}
$$
The correct option is \textbf{(a)}
\end{answer}
\item The semi-empirical mass formula for the binding energy of nucleus contains a surface correction term. This term depends on the mass number $A$ of the nucleus as
{\exyear{GATE 2011}}
\begin{tasks}(2)
\task[\textbf{A.}] $A^{-1 / 3}$
\task[\textbf{B.}]$A^{1 / 3}$
\task[\textbf{C.}]$A^{2 / 3}$
\task[\textbf{D.}]$A$
\end{tasks}
\begin{answer}
The correct option is \textbf{(c)}
\end{answer}
\item According to the single particles nuclear shell model, the spin-parity of the ground state of ${ }_{8}^{17} O$ is
{\exyear{GATE 2011}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{1}{2}$
\task[\textbf{B.}]$\frac{3}{2}$
\task[\textbf{C.}]$\frac{3}{2}^{+}$
\task[\textbf{D.}] $\frac{5^{+}}{2}$
\end{tasks}
\begin{answer}
$$Z=8 \text { and } N=9 ;\left(s_{1 / 2}\right)^{2}\left(p_{3 / 2}\right)^{4}\left(p_{1 / 2}\right)^{2}\left(d_{5 / 2}\right)^{1}$$
$$
l=2, J=\frac{5}{2} \Rightarrow \text { parity }=(-1)^{2}=+1, \quad \text { spin - parity }=\left(\frac{5}{2}\right)^{+}
$$
The correct option is \textbf{(d)}
\end{answer}
\item In the nuclear shell model, the potential is modeled as $V(r)=\frac{1}{2} m \omega^{2} r^{2}-\lambda \vec{L} \cdot \vec{S}, \lambda>0$ The correct spin-parity and isospin assignments for the ground state of ${ }_{6}^{13} \mathrm{C}$ is
{\exyear{GATE 2015}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{1^{-}}{2} ; \frac{-1}{2}$
\task[\textbf{B.}]$\frac{1^{+}}{2} ; \frac{-1}{2}$
\task[\textbf{C.}] $\frac{3^{+}}{2} ; \frac{1}{2}$
\task[\textbf{D.}]$\frac{3^{-}}{2} ; \frac{-1}{2}$
\end{tasks}
\begin{answer}
$$
{ }^{13} C_{6}, \quad N=7, Z=6, \text { for } N=7 ; \quad\left(1 S_{\frac{1}{2}}\right)^{2}\left(1 P_{\frac{3}{2}}\right)^{4}\left(P_{\frac{1}{2}}\right)^{1} \Rightarrow j=\frac{1}{2} \text { and } l=1
$$
$$
\text { Thus spin- parity is }\left(\frac{1}{2}\right)^{-} \text {. }
$$
\end{answer}
\item According to the nuclear shell model, the respective ground state spin-parity values of ${ }_{8}^{15} O$ and ${ }_{8}^{17} O$ nuclei are
{\exyear{GATE 2016}}
\begin{tasks}(2)
\task[\textbf{A.}] $\frac{1^{+}}{2}, \frac{1^{-}}{2}$
\task[\textbf{B.}]$\frac{1}{2}^{-}, \frac{5^{+}}{2}$
\task[\textbf{C.}]$\frac{3^{-}}{2}, \frac{5^{+}}{2}$
\task[\textbf{D.}]$\frac{3^{-}}{2}, \frac{1^{-}}{2}$
\end{tasks}
\begin{answer}
${ }_{8}^{15} O ; Z=8$ and $N=7 ; \quad N=7:\left(s_{1 / 2}\right)^{2}\left(p_{3 / 2}\right)^{4}\left(p_{1 / 2}\right)^{1}$\\
$\Rightarrow j=\frac{1}{2}$ and $l=1$. Thus spin and parity $=\left(\frac{1}{2}\right)^{-}$\\
${ }_{8}^{17} O ; Z=8$ and $N=9 ; \quad N=9:\left(s_{1 / 2}\right)^{2}\left(p_{3 / 2}\right)^{4}\left(p_{1 / 2}\right)^{2}\left(d_{5 / 2}\right)^{1}$\\
$\Rightarrow j=\frac{5}{2}$ and $l=2$. Thus spin and parity $=\left(\frac{5}{2}\right)^{+}$
\end{answer}
\item An $\alpha$ particle is emitted by ${ }_{90}^{230} T h$ nucleus. Assuming the potential to be purely Coulombic beyond the point of separation, the height of the Coulomb barrier is $\mathrm{MeV}$ (up to two decimal places).
$$
\left(\frac{e^{2}}{4 \pi \epsilon_{0}}=1.44 \mathrm{MeV}-\mathrm{fm}, r_{0}=1.30 \mathrm{fm}\right)
$$
{\exyear{GATE 2018}}
\begin{answer}
The height of coulomb barrier for $\alpha$ particle from \\
${ }_{90} T h^{230} \rightarrow_{88} X^{226}+2 \mathrm{He}^{4}(\alpha$ - particle $)$
$$
V_{C}=\frac{1}{4 \pi \epsilon_{0}}\left(\frac{2 z e^{2}}{R}\right)
$$
Here, $R_{0}=1.3 \mathrm{fm}, \frac{e^{2}}{4 \pi \in_{0}}=1.44 \mathrm{MeV} \mathrm{fm}$
And $R=R_{0} A^{1 / 3}$\\
Here, we consider pure Coulombic interection
\begin{align*}
&A_{T h}^{1 / 3}=A_{X}^{1 / 3}+A_{\alpha}^{1 / 3}=(226)^{1 / 3}+(4)^{1 / 3}=(6.09+1.58)=7.67 \\
&R=R_{0} A_{T h}^{1 / 3}=1.3(7.67)
\end{align*}
Hence, $V_{C}=\left(\frac{e^{2}}{4 \pi \in_{0}}\right) \frac{2 \times 90}{1.3(7.67)}=\frac{180 \times 1.44}{1.3 \times 7.67} \frac{\mathrm{MeV}}{f \mathrm{fm}}$
$$
V_{C}=25.995 \mathrm{MeV}
$$
\end{answer}
\item The nuclear spin and parity of ${ }_{20}^{40} \mathrm{Ca}$ in its ground state is
{\exyear{GATE 2019}}
\begin{tasks}(2)
\task[\textbf{A.}] $0^{+}$
\task[\textbf{B.}] $0^{-}$
\task[\textbf{C.}] $1^{+}$
\task[\textbf{D.}] $1^{-}$
\end{tasks}
\begin{answer}
${ }_{20}^{40} \mathrm{Ca}$ is an even-even nuclei, therefore $I=0, P=+v e$\\
Spin-parity $=0^{+}$\\
The correct option is \textbf{(a)}
\end{answer}
\item A radioactive element $X$ has a half-life of 30 hours. It decays via alpha, beta and gamma emissions with the branching ratio for beta decay being $0.75$. The partial half-life for beta decay in unit of hours is
{\exyear{GATE 2019}}
\begin{answer}
Branching ratio is the fraction of particles (here $\beta$ ) which decays by an individual decay mode with respect to the total number of particles which decays
$$B R=\frac{\left(\frac{d N}{d t}\right)_{x}}{\left(\frac{d t}{d t}\right)_{\beta}}=\frac{\left(T_{1 / 2}\right)_{x}}{\left(T_{1 / 2}\right)_{\beta}} \Rightarrow\left(T_{1 / 2}\right)_{\beta}=\frac{\left(T_{1 / 2}\right)_{x}}{B R}=\frac{30}{0.75}=40 \text { hours }$$
\end{answer}
\end{enumerate}
| {
"alphanum_fraction": 0.5963549447,
"avg_line_length": 53.8391420912,
"ext": "tex",
"hexsha": "7e83ed5e26ec6619a1106b02bedfaeb6fbf00f49",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "archives-futuring/CSIR-Physics-Study-Material",
"max_forks_repo_path": "Nuclear and particle properties/chapter/nuclear solutions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "archives-futuring/CSIR-Physics-Study-Material",
"max_issues_repo_path": "Nuclear and particle properties/chapter/nuclear solutions.tex",
"max_line_length": 385,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "689cff91895fec36b4bb0add178f13a0f68648ab",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "archives-futuring/CSIR-Physics-Study-Material",
"max_stars_repo_path": "Nuclear and particle properties/chapter/nuclear solutions.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8624,
"size": 20082
} |
\section*{Run \#4}
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{images/4/stream_image}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{images/4/trajectories}
\end{figure}
| {
"alphanum_fraction": 0.7012448133,
"avg_line_length": 18.5384615385,
"ext": "tex",
"hexsha": "b14a787e58e471448e0bfcca9f7b67498d9c53a4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a2c3921d3384113a9a4774691d3c8debdfb6c444",
"max_forks_repo_licenses": [
"CC-BY-3.0"
],
"max_forks_repo_name": "aneeshnaik/aneeshnaik.github.io",
"max_forks_repo_path": "streams/sgr/macros/4.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a2c3921d3384113a9a4774691d3c8debdfb6c444",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-3.0"
],
"max_issues_repo_name": "aneeshnaik/aneeshnaik.github.io",
"max_issues_repo_path": "streams/sgr/macros/4.tex",
"max_line_length": 64,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a2c3921d3384113a9a4774691d3c8debdfb6c444",
"max_stars_repo_licenses": [
"CC-BY-3.0"
],
"max_stars_repo_name": "aneeshnaik/aneeshnaik.github.io",
"max_stars_repo_path": "streams/sgr/macros/4.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 81,
"size": 241
} |
\documentclass[a4paper,twocolumn]{article}
\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\usepackage[T1]{fontenc}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{dsfont}
\usepackage{graphicx}
\usepackage{color}
\newcommand{\N}{\mathbf{N}}
\newcommand{\Z}{\mathbf{Z}}
\newcommand{\Q}{\mathbf{Q}}
\newcommand{\R}{\mathbf{R}}
\newcommand{\C}{\mathbf{C}}
\author{Florian Fontan}
\title{Advanced Models and Methods in Operations Research \\ Project: Batch scheduling}
\date{2021--2022}
\begin{document}
\maketitle
For each problem considered, instances and a code skeleton containing an instance parser and a solution checker are provided in the \texttt{data/} and \texttt{python/} folders of the project.
The algorithms must be implemented in the provided files between the tags \texttt{TODO START} and \texttt{TODO END}.
They must be tested on all the provided instances with the command:
\texttt{python3 problem.py -i instance.json -c certificate.json}
And each solution file must be validated by the provided checker:
\texttt{python3 problem.py -a checker -i instance.json -c certificate.json}
The results must be reproducible.
\bigskip
The delivrable must contain:
\begin{itemize}
\item A \emph{short} report describing and justifying the proposed algorithms
\item The code implementing the algorithms
\item The solution files obtained on the provided instances
\end{itemize}
\section{Dynamic Programming}
We consider the Knapsack problem with width:
\begin{itemize}
\item Input:
\begin{itemize}
\item $n$ items; for each item $j = 1, \dots, n$
\begin{itemize}
\item a weight $w_j \in \N^+$
\item a width $l_j \in \N^+$
\item a profit $p_j \in \N^+$
\end{itemize}
\item a capacity $C \in \N^+$
\end{itemize}
\item Problem: select a subset of items such that
\begin{itemize}
\item the total weight of the selected items does not exceed the knapsack capacity
\end{itemize}
\item Objective: maximize the total profit of the selected items minus the maximum width among the selected items
\end{itemize}
Propose and implement an algorithm based on Dynamic Programming for this problem.
\section{Heuristic Tree Search}
We consider the Knapsack problem with width and conflicts:
\begin{itemize}
\item Input:
\begin{itemize}
\item $n$ items; for each item $j = 1, \dots, n$
\begin{itemize}
\item a weight $w_j \in \N^+$
\item a width $l_j \in \N^+$
\item a profit $p_j \in \N^+$
\end{itemize}
\item a capacity $C \in \N^+$
\item a graph $G$ such that each node corresponds to an item
\end{itemize}
\item Problem: select a subset of items such that
\begin{itemize}
\item the total weight of the selected items does not exceed the knapsack capacity
\item if there exists an edge between vertex $j_1$ and $j_2$ in $G$, then item $j_1$ and item $j_2$ must not be both selected
\end{itemize}
\item Objective: maximize the total profit of the selected items minus the maximum width among the selected items
\end{itemize}
Propose and implement an algorithm based on Heuristic Tree Search with Dynamic Programming for this problem.
\section{Column Generation \\ + Dynamic Programming}
We consider the Single machine batch scheduling problem with Makespan objective:
\begin{itemize}
\item Input:
\begin{itemize}
\item $n$ jobs; for each job $j = 1, \dots, n$, a processing time $p_j \in \N^+$ and a size $s_j \in \N^+$
\item a batch capacity $Q \in \N^+$
\end{itemize}
\item Problem: partition the jobs into batches and sequence the batches such that:
\begin{itemize}
\item each job must be in exactly one of the batches
\item the processing time of a batch is equal to the longest processing time among all jobs it contains
\item the total size of the jobs in a batch does not exceed its capacity
\end{itemize}
\item Objective: minimize the makespan of the schedule
\end{itemize}
Propose an exponential formulation and implement an algorithm based on a Column Generation heuristic for this problem.
\section{Column Generation \\ + Heuristic Tree Search}
We consider the Single machine batch scheduling problem with conflicts and Makespan objective:
\begin{itemize}
\item Input:
\begin{itemize}
\item $n$ jobs; for each job $j = 1, \dots, n$, a processing time $p_j \in \N^+$ and a size $s_j \in \N^+$
\item a batch capacity $Q \in \N^+$
\item a graph $G$ such that each node corresponds to a job
\end{itemize}
\item Problem: partition the jobs into batches and sequence the batches such that:
\begin{itemize}
\item each job must be in exactly one of the batches
\item the processing time of a batch is equal to the longest processing time among all jobs it contains
\item the total size of the jobs in a batch does not exceed its capacity
\item if there exists an edge between vertex $j_1$ and vertex $j_2$ in $G$, then job $j_1$ and job $j_2$ must not be in the same batch
\end{itemize}
\item Objective: minimize the makespan of the schedule
\end{itemize}
Propose an exponential formulation and implement an algorithm based on a Column Generation heuristic for this problem.
\end{document}
| {
"alphanum_fraction": 0.7145273445,
"avg_line_length": 38.5579710145,
"ext": "tex",
"hexsha": "e1ec5b6914cd2ec8fd0c8777b81b8339971ef3ce",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-12-07T22:36:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-23T06:30:38.000Z",
"max_forks_repo_head_hexsha": "e17efed49220bdbc93c51585f0e18edf262740f9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Arkhist/AdvancedMethodsOR-heuristics-teaching",
"max_forks_repo_path": "2021-2022 - M2 ORCO - Advanced OR/Projects/Batch scheduling/subject/main.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "e17efed49220bdbc93c51585f0e18edf262740f9",
"max_issues_repo_issues_event_max_datetime": "2021-11-11T22:52:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-10T06:51:43.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Arkhist/AdvancedMethodsOR-heuristics-teaching",
"max_issues_repo_path": "2021-2022 - M2 ORCO - Advanced OR/Projects/Batch scheduling/subject/main.tex",
"max_line_length": 191,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "e17efed49220bdbc93c51585f0e18edf262740f9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Arkhist/AdvancedMethodsOR-heuristics-teaching",
"max_stars_repo_path": "2021-2022 - M2 ORCO - Advanced OR/Projects/Batch scheduling/subject/main.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-06T05:33:51.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-31T17:48:11.000Z",
"num_tokens": 1440,
"size": 5321
} |
\section{Lineage Tracing}
\label{sec:lic}
As a foundation of lineage-based reuse, we first describe efficient means to lineage tracing and key operations. Lineage graphs may get very large though, especially for mini-batch training. For this reason, we introduce the idea of lineage deduplication for loops and functions. Finally, we discuss design decisions and limitations.
\subsection{Basic Lineage Tracing}
\label{sec:tracing}
During runtime of a linear algebra program, LIMA maintains---in a thread- and function-local manner---lineage DAGs for all live variables of this execution context. Figure~\ref{fig:lineage} shows the lifecycle of such lineage information and key operations.
\begin{definition2}[Lineage DAGs] A lineage DAG $\mathcal{L}$ is a directed, acyclic graph, whose nodes (or lineage items) represent operations and their outputs, and whose edges represent data dependencies. Lineage items consist of an ID, an opcode, an ordered list of input lineage items, an optional data string and hash, and a visited flag for memoization of processed sub graphs. Leaf nodes are literals or matrix creation operations (e.g., \texttt{read} or \texttt{rand}), and multiple inner nodes might refer to the same inputs. Thus, the lineage DAG is a data flow graph, that encodes the exact creation process of intermediate results, without the computation that determined the control flow path.
\end{definition2}
\textbf{Lineage Tracing:} The immutable lineage DAG for live variables is then incrementally built by lineage tracing as we execute runtime instructions (Figure~\ref{fig:lineage}, \emph{trace}). Every execution context maintains a \texttt{LineageMap} that maps live variable names to lineage items (Figure~\ref{fig:lineage}, red root nodes), and caches literal lineage items. As a lean runtime integration, individual instructions---in a class hierarchy of instructions for local and distributed operations---implement a dedicated interface \texttt{LineageTraceable} for obtaining lineage items. Before\footnote{Lineage tracing \emph{before} instruction execution facilities reuse as described in Section~\ref{sec:reuse}.} executing an instruction (integrated in \texttt{preprocessInstruction}), we obtain the lineage items for the instruction output(s) and update the lineage map. Special instructions like \texttt{mvvar} and \texttt{rmvar}---for renaming and removing variables---only modify the mapping of live variables to lineage items. For capturing non-determinism, we also modified selected runtime instructions, like \texttt{rand} or \texttt{sample}, to create system-generated seeds on \texttt{preprocessInstruction} for inclusion in the lineage items.
\textbf{Comparisons:} When working with multiple, potentially overlapping lineage DAGs, a key operation is the comparison of two lineage DAGs for equivalence or containment (Figure~\ref{fig:lineage}, \emph{compare}). For this purpose, lineage items implement \texttt{hashCode()} and \texttt{equals()}, whose semantics are recursively defined. First, the hash code of a lineage item is computed as a hash over the hashes of the opcode, data item, and all inputs. As lineage DAGs are immutable, we cache the computed hash for every lineage item. Second, the equals check returns true if the opcode, data item, and all inputs are equivalent. In order to handle large DAGs, we use memoization to avoid redundant processing of sub-DAGs reachable over multiple paths, and non-recursive, queue-based function implementations.
\textbf{Serialization and Deserialization:} Users may obtain the lineage in two forms. First, a new \texttt{lineage(X)} built-in function returns the lineage DAG of variable $\mat{X}$ as a string. Second, for every write to a file \texttt{write(X,'f.bin')}, we also write the lineage DAG to a text file \texttt{'f.bin.lineage'}. Both cases require the serialization of the lineage DAGs (Figure~\ref{fig:lineage}, \emph{serialize}). This serialization unrolls the lineage DAG in a depth-first manner, creating a text line per lineage item. Inputs are represented via IDs and memoization ensures that every item is serialized once in the lineage log. To handle large DAGs, we again use a non-recursive implementation with stack-based data structures. The lineage log can be deserialized back into a lineage DAG (Figure~\ref{fig:lineage}, \emph{deserialize}) by processing the lineage log line-by-line. For every line, we obtain input items from a lookup table, create the lineage item, and store it in the lookup table.
\textbf{Re-computation from Lineage:} Additionally, we provide a utility for generating a runtime program from a lineage DAG (Figure~\ref{fig:lineage}, \emph{reconstruct}) that computes---given the same input---exactly the same intermediate. In contrast to the original program, the reconstructed program does not contain control flow but only the operations for computing the output. The entire lifecycle from tracing, over serialization and deserialization, to the re-computation by lineage is very valuable as it simplifies testing, debugging, and reproducibility as illustrated by the following example.
\begin{example}[Debugging with Lineage] Let us share a debugging story from practice, which motivated the lineage support in SystemDS. Users deployed a sentence classification pipeline in production, noticed differences in results compared to the development setup, and reported this as a blocking issue. We reproduced the setup, spent nights debugging it up to round-off errors of different degrees of parallelism, and yet, still could not reproduce it. Finally, we found that the modified deployment infrastructure passed arguments incorrectly, making the pipeline use default parameters. With lineage support, such multi-person debugging efforts become much simpler: lineage logs can be exchanged, compared, and used to reproduce results.
\end{example}
\begin{figure}[!t]
\centering
\includegraphics[scale=0.32]{figures/lineage}
\vspace{-0.25cm}
\caption{\label{fig:lineage}Lineage Tracing Lifecycle and Operations.}
\end{figure}
\subsection{Lineage Deduplication}
\label{sec:deduplication}
A new challenge of fine-grained lineage tracing are potentially very large lineage DAGs in use cases like mini-batch training. Consider an average lineage item size of $64\bb$, and training 200 epochs on a dataset of $10\text{M}$ rows, batch-size 32, and $\num{1000}$ instructions per iteration. The resulting lineage DAG would grow to $4\tb$. We address this issue inside ML systems with a new concept of \emph{lineage deduplication}, reducing the size to $4\gb$ in this example. Additionally, deduplication can remove the overhead of repetitive tracing.
\textbf{Basic Idea:} Large lineage DAGs originate from the repeated execution of code paths in loops and functions, which create repeated patterns in the lineage graph. The basic idea of lineage deduplication is to eliminate these repeated patterns in the lineage DAG as a form of compression. Conceptually, we extract lineage sub-DAGs called patches, store them once, and refer to these patches via a single lineage item. Since reactive deduplication (after lineage tracing)---similar to function outlining---is a hard problem and brittle, we perform proactive deduplication on entering last-level loops and functions. However, as the number of lineage patches is exponential in the number of branches, we use a \emph{hybrid} design with proactive setup, and minimal runtime tracing.
\textbf{Loop Deduplication Setup:} On entering a last-level \texttt{for}, \texttt{parfor}, or \texttt{while} loop, we analyze the distinct control paths to aid deduplication. The distinct control paths are all possible execution paths (e.g., $2^3$ paths for a sequence of three \texttt{if-else}-blocks), each with its own lineage patch. During setup, we count these paths in a single pass through the program, replicating the current set of traced paths at every branch. In this process, we also assign branch positions (IDs) and materialize these IDs in the \texttt{if-else} program blocks. For nested branches, the IDs are assigned in a depth-first order of the entire subprogram. Additionally, we obtain the inputs and outputs of the loop body from live variable analysis, and prepare an empty map of lineage patches but do not materialize these patches to avoid unnecessary setup for paths that are never taken.
\textbf{Loop Deduplication Tracing:} During iteration runtime, we trace temporary lineage DAGs. We first construct ordered placeholder items for the loop inputs and indexes. Additionally, we initialize a bitvector \mat{b} for tracing the taken path, where bit $\mat{b}_i$ is set to the evaluated condition of branch $i$. We then execute the loop body, while performing basic lineage tracing and updating \mat{b}. At the end of an iteration, we maintain the map of lineage patches and the global lineage DAG. The bitvector \mat{b} represents the key of the lineage patch, and we keep the collected lineage DAG as a new patch if it does not yet exist. Finally, a single dedup lineage item---pointing to the lineage patch---is added onto the global lineage DAG. Once lineage patches are created for all distinct paths, we stop this on-demand lineage tracing, and only trace the taken control paths.
\begin{figure}[!t]
\centering
\includegraphics[scale=0.32]{figures/dedup}
\vspace{-0.25cm}
\caption{\label{fig:dedup}Example Lineage Deduplication for PageRank.}
\end{figure}
\begin{example}[PageRank Loop Deduplication] Figure~\ref{fig:dedup} illustrates this concept of loop deduplication for a classical PageRank graph algorithm. On the left, we see the original script, where $\mat{G}$ is a sparse graph representing the linked websites, and $\mat{p}$ is the iteratively updated page rank of individual sites. When executing three iterations without deduplication, we get the lineage graph in the center with repeated substructures. In contrast, with loop deduplication, we have extracted one lineage patch with four inputs and one output, and add a single lineage item per iteration to the lineage graph.
\end{example}
\textbf{Function Deduplication:} Similar to loop deduplication, we apply the same concept for functions that do not contain loops or other function calls. We again count the distinct control paths upfront, use the bitvector approach to trace the taken path, and add a single lineage item per function call to the lineage graph. Additional support for nested loops and function calls is interesting future work. We focused on last-level loops and functions, which offers a good tradeoff between simplicity and benefit of deduplication.
\textbf{Handling of Non-Determinism:} Coming back to our example of mini-batch training. Many DNN architectures contain \texttt{dropout} layers for regularization, which is a non-deterministic operation that generates new dropout masks in every iteration. Our approach to handling such non-determinism in the context of deduplication is to model the seeds as input placeholders of the lineage patch, trace these seeds like the control path bitvector, and add them as literal inputs to the single dedup item. Similarly, all functions are tagged as deterministic or non-deterministic during compilation.
\textbf{Operations on Deduplicated Graphs:} All basic lineage operations apply to deduplicated lineage graphs too. However, na\"ively decompressing the lineage graph---by lookup of lineage patches and expansion---would defeat the purpose of deduplication. We alleviate this problem by two extensions. First, we \emph{serialize} and \emph{deserialize} the dictionary of lineage patches to preserve the deduplication for storage and transfer. We further extended the \emph{compare} functionality to match normal and deduplicated sub-DAGs, by enforcing equal hashes for regular and dedup items, and resolving dedup items if needed. Second, program reconstruction would also cause expansion. Hence, on \emph{reconstruct}, we compile the lineage patches into functions, and sequences of equivalent dedup items into loops.
\subsection{Lineage Tracing for Advanced Features}
\label{sec:advanced}
Modern ML systems further provide advanced features such as (1) operator fusion, and (2) task-parallel for loops, which are both widely used and thus, important to integrate with lineage tracing.
\textbf{Operator Fusion:} Operator fusion via code generation is crucial for performance because it can avoid materialized intermediates \cite{CrottyGDKBCZ15,BoehmRHSEP18,PalkarTNTPNSSPA18}, allow scan sharing and sparsity exploitation \cite{BoehmRHSEP18,HuangB013}, and kernel specialization for accelerators \cite{ChenMJZYSCWHCGK18,XLA,AshariTBRCKS15}. However, fusion loses the operator semantics and thus, does not allow lineage tracing. This limitation is problematic because it cuts the lineage trace into unusable pieces. Our approach is simple, yet effective. We construct the lineage patches of fused operators (with ordered placeholders) during compilation, and store them in a dictionary. During runtime, we expand the lineage graph by these lineage patches. Lineage now also enables new techniques such as de-optimizing fused operators and reuse-aware fusion.
\textbf{Task-parallel Loops:} Numerical computing frameworks like MATLAB~\cite{SharmaM09}, R~\cite{Rdopar}, or Julia~\cite{BezansonEKS17}, and ML systems like TensorFlow~\cite{AbadiBCCDDDGIIK16} or SystemML~\cite{BoehmDEEMPRRSST16,BoehmTRSTBV14} provide means of task-parallel loops (e.g., for hyper-parameter tuning). Implementation details vary, but often multi-threaded and/or distributed workers are used. For ensuring isolation, we trace lineage in a worker-local manner, but individual lineage graphs share their common input lineage. Distributed operations leverage the \emph{serialize} and \emph{deserialize} operations to transfer lineage. The worker results are merged by taking their lineage roots and constructing a linearized lineage graph.
\subsection{Limitations}
\label{sec:limits1}
The LIMA lineage tracing makes several tradeoffs. In the following, we discuss these design decisions and related limitations.
\begin{itemize}
\item \emph{Immutable Files/RDDs:} We assume input files and RDDs are read-only (i.e., deterministic reads), which is a reasonable assumption and eliminates the need for data summarization.
\item \emph{No Capturing of Control Flow:} The lineage DAG represents the computation of an intermediate without the control path decisions. We made this choice because the original script is a more concise representation of the actual program.
\item \emph{Result Differences:} Despite handling non-determinism, reconstructed programs might produce slightly different results. Reasons include multi-threaded or distributed operations (aggregation orders), different environments (memory budgets, cluster size), and different artifacts (SW versions).
\end{itemize}
Our design focuses primarily on simplicity, efficiency, and robustness, which are key for leveraging lineage in many use cases like versioning, debugging, auto differentiation, and lineage-based reuse.
%TODO deduplication with non-determinism (seed placeholders)
%TODO deduplication for functions
| {
"alphanum_fraction": 0.8063986336,
"avg_line_length": 187.9259259259,
"ext": "tex",
"hexsha": "b5506b4b0418e6ef8e521158abea5addf4f9f30c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f7804b2513859f7e6f14fa7842d81003d0758bf8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "damslab/reproducibility",
"max_forks_repo_path": "sigmod2021-LIMA-p32/paper/Lineage.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f7804b2513859f7e6f14fa7842d81003d0758bf8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "damslab/reproducibility",
"max_issues_repo_path": "sigmod2021-LIMA-p32/paper/Lineage.tex",
"max_line_length": 1262,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "f7804b2513859f7e6f14fa7842d81003d0758bf8",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "damslab/reproducibility",
"max_stars_repo_path": "sigmod2021-LIMA-p32/paper/Lineage.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-27T14:38:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-10T17:20:26.000Z",
"num_tokens": 3351,
"size": 15222
} |
\section{Introduction}\label{section:introduction}
This work presents a novel approach to learning the quality and performance of
an algorithm through the use of evolution. When an algorithm is developed to
solve a given problem, the designer is presented with questions about the
performance of their proposed method, and its relative performance against
existing methods. This is an inherently difficult task. However, under the
current paradigm, the standard response to this situation is to use a known
fixed set of datasets \-- or simulate new data sets themselves \-- and a common
metric amongst the proposed method and its competitors. The algorithm is then
assessed based on this metric with often minimal consideration for both the
appropriateness or reliability of the datasets being used, and the robustness of
the method in question.
This notion is not so easily observed when travelling in the opposite direction.
Suppose that, instead, the benchmark was a dataset of particular interest and a
preferable algorithm was to be determined for some task. There exist a number of
methods employed across disciplines to complete this task that take into account
the characteristics of the data and the context of the research problem. These
methods include the use of diagnostic tests. For instance, in the case of
clustering, if the data displayed an indeterminate number of non-convex blobs,
then one could recommend that an appropriate clustering algorithm would be
DBSCAN~\cite{Ester1996}. Otherwise, for scalability, \(k\)-means may be
chosen~\cite{Wu2009}.
The approach presented in this work aims to flip the paradigm described here by
allowing the data itself to be unfixed. This fluidity in the data is achieved by
generating data for which the algorithm performs well (or better than some
other) through the use of an evolutionary algorithm. The purpose of doing so is
not to simply create a bank of useful datasets but rather to allow for the
subsequent studying of these datasets. In doing so, the attributes and
characteristics which lead to the success (or failure) of the algorithm may be
described, giving a broader understanding of the algorithm on the whole. Our
framework is described in Figure~\ref{fig:paradigm}.
\inputtikz{paradigm}{%
On the right: the current path for selecting some algorithm(s) based on
their validity and performance for a given dataset. On the left: the
proposed flip to better understand the space in which `good' datasets exist
for an algorithm.
}
This proposed flip has a number of motivations, and below is a non-exhaustive
list of some of the problems that are presented by the established evaluation
paradigm:
% TODO More citations required here. Places to start looking: Hyndman blogpost,
% open science papers, p-hacking critique, data carpentry slides.
\begin{enumerate}
\item How are these benchmark examples selected? There is no true measure of
their reliability other than their frequent use. In some domains and
disciplines there are well-established benchmarks so those found through
literature may well be reliable, but in others less so.
\item Sometimes, when there is a lack of benchmark examples, a `new' dataset
is simulated to assess the algorithm. This begs the question as to how
and why that simulation is created. Not only this, but the origins of
existing benchmarks is often a matter of convenience rather than their
merit.
\item In disciplines where there are established benchmarks, there may still
be underlying problems around the true performance of an algorithm:
\begin{enumerate}[(i)]
\item As an example, work by Torralba and Efros~\cite{Torralba2011}
showed that image classifiers trained and evaluated on a
particular dataset, or datasets, did not perform reliably when
evaluated using other benchmark datasets that were determined
to be similar. Thus leading to a model which lacks robustness.
\item The amount of learning one can gain as to the characteristics
of data which lead to good (or bad) performance of an algorithm
is constrained to the finite set of attributes present in the
benchmark data chosen in the first place.
\end{enumerate}
\end{enumerate}
Evolutionary algorithms (EAs) have been applied successfully to solve a wide
array of problems \-- particularly where the complexity of the problem or its
domain are significant. These methods are highly adaptive and
their population-based construction (displayed in Figure~\ref{fig:flowchart})
allows for the efficient solving of problems that are otherwise beyond the scope
of traditional search and optimisation methods.
\inputtikz{flowchart}{%
A general schematic for an evolutionary algorithm.
}
The use of EAs to generate artificial data is not a new concept. Its
applications in data generation have included developing methods for the
automated testing of software~\cite{Koleejan2015,Michael2001,Sharifipour2018}
and the synthesis of existing or confidential data~\cite{Chen2016}. Such methods
also have a long history in the parameter optimisation of algorithms, and
recently in the automated design of convolutional neural network (CNN)
architecture~\cite{Suganuma2017,Sun2018}.
Other methods for the generation or synthesis of artificial data include
simulated annealing~\cite{Matejka2017} and generative adversarial networks
(GANs)~\cite{Goodfellow2014}. The unconstrained learning style of methods such
as CNNs and GANs aligns with that proposed in this work. By allowing the EA to
explore and learn about the search space in an organic way, less-prejudiced
insight can be established that is not necessarily reliant on any particular
framework or agenda.
Note that the proposed methodology is not simply to use an EA to optimise an
algorithm over a search space with fixed dimension or datatype such as those set
out in~\cite{Chen2016}. The size and sample space itself is considered as a
property that can be traversed through the algorithm.
| {
"alphanum_fraction": 0.7808041505,
"avg_line_length": 58.1886792453,
"ext": "tex",
"hexsha": "a20064f973313cc2f0621fe4f4cc1a9aef690551",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5bd803f7fe52a7043ce39fbec0ae7974ce89029a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "daffidwilde/edo-paper",
"max_forks_repo_path": "sections/intro.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5bd803f7fe52a7043ce39fbec0ae7974ce89029a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "daffidwilde/edo-paper",
"max_issues_repo_path": "sections/intro.tex",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5bd803f7fe52a7043ce39fbec0ae7974ce89029a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "daffidwilde/edo-paper",
"max_stars_repo_path": "sections/intro.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1309,
"size": 6168
} |
\clearpage
\subsection{C Memory Allocation Functions} % (fold)
\label{sub:c_memory_allocation}
C includes a number of memory allocation functions: \nameref{ssub:malloc}, \nameref{ssub:calloc}, \nameref{ssub:realloc}, \nameref{ssub:free}.
\subsubsection{malloc} % (fold)
\label{ssub:malloc}
\texttt{malloc} is the standard memory allocation function. You tell it how much space you want, and it allocates you that many bytes on the heap. This is a function, that returns a pointer to the space allocated.
\begin{table}[h]
\centering
\begin{tabular}{|c|p{9.5cm}|}
\hline
\multicolumn{2}{|c|}{\textbf{Function Prototype}} \\
\hline
\multicolumn{2}{|c|}{} \\
\multicolumn{2}{|c|}{\texttt{void *malloc(size\_t size )}} \\
\multicolumn{2}{|c|}{} \\
\hline
\multicolumn{2}{|c|}{\textbf{Returns}} \\
\hline
\texttt{void *} & A pointer to the allocated space is returned. \\
\hline
\textbf{Parameter} & \textbf{Description} \\
\hline
\texttt{ size } & The number of bytes to allocate on the heap. \\
\hline
\end{tabular}
\caption{Details of the \texttt{malloc} function}
\label{tbl:malloc}
\end{table}
\csection{\ccode{clst:malloc}{Example calls to \texttt{malloc}}{code/c/dynamic-memory/malloc-example.c}}
\mynote{
\begin{itemize}
\item \texttt{malloc} is used for \emph{memory allocation}.
\item You need to include \textbf{stdlib.h} to use \texttt{malloc}.
\item \texttt{malloc} allows you to allocate space on the heap. It returns a pointer to this space.
\item \texttt{malloc} returns a \texttt{void} pointer, you need to type cast this to the kind of pointer you want, for example \texttt{(int *)} casts it to an integer pointer.
\item \texttt{malloc} returns \texttt{NULL} if it fails to allocate memory.
\end{itemize}
}
% subsubsection malloc (end)
\clearpage
\subsubsection{calloc} % (fold)
\label{ssub:calloc}
The difference between \texttt{calloc} and \texttt{malloc} is that \texttt{calloc} clears the memory allocation. When you call \texttt{calloc} you pass it a number and a size, and \texttt{calloc} returns you a pointer to a block of memory that is $number \times size$ bytes.
\begin{table}[h]
\centering
\begin{tabular}{|c|p{9.5cm}|}
\hline
\multicolumn{2}{|c|}{\textbf{Function Prototype}} \\
\hline
\multicolumn{2}{|c|}{} \\
\multicolumn{2}{|c|}{\texttt{void *calloc( size\_t num, size\_t size )}} \\
\multicolumn{2}{|c|}{} \\
\hline
\multicolumn{2}{|c|}{\textbf{Returns}} \\
\hline
\texttt{void *} & A pointer to the allocated space is returned. \\
\hline
\textbf{Parameter} & \textbf{Description} \\
\hline
\texttt{ num } & The number of elements to allocate to the array.\\
& \\
\texttt{ size } & The size of each element to be allocated on the heap. \\
\hline
\end{tabular}
\caption{Details of the \texttt{calloc} function}
\label{tbl:calloc}
\end{table}
\csection{\ccode{clst:calloc}{Example calls to \texttt{calloc}}{code/c/dynamic-memory/calloc-example.c}}
\mynote{
\begin{itemize}
\item \texttt{calloc} is used for getting a \emph{cleared memory allocation}.
\item You need to include \textbf{stdlib.h} to use \texttt{calloc}.
\item \texttt{calloc} performs a similar task to \nameref{ssub:malloc}, with the addition of clearing the space allocated.
\item After calling \texttt{calloc} the memory you are allocated will have all of its bytes set to 0, whereas with \nameref{ssub:malloc} the memory retains whatever value was there previously.
\item \texttt{calloc} returns \texttt{NULL} if it fails to allocate memory.
\end{itemize}
}
% subsubsection calloc (end)
\clearpage
\subsubsection{realloc} % (fold)
\label{ssub:realloc}
Like \texttt{malloc} and \texttt{calloc}, \texttt{realloc} allows you to allocate space from the heap. \texttt{realloc} allows you to allocate or change (\emph{reallocate}) space on the heap.
\begin{table}[h]
\centering
\begin{tabular}{|c|p{9.5cm}|}
\hline
\multicolumn{2}{|c|}{\textbf{Function Prototype}} \\
\hline
\multicolumn{2}{|c|}{} \\
\multicolumn{2}{|c|}{\texttt{void *realloc( void *ptr, size\_t size )}} \\
\multicolumn{2}{|c|}{} \\
\hline
\multicolumn{2}{|c|}{\textbf{Returns}} \\
\hline
\texttt{void *} & A pointer to the allocated space is returned. \\
\hline
\textbf{Parameter} & \textbf{Description} \\
\hline
\texttt{ ptr } & The pointer to \emph{reallocate} space for on the heap.\\
& \\
\texttt{ size } & The size of each element to be allocated on the heap. \\
\hline
\end{tabular}
\caption{Details of the \texttt{realloc} function}
\label{tbl:realloc}
\end{table}
\csection{\ccode{clst:realloc}{Example calls to \texttt{realloc}}{code/c/dynamic-memory/realloc-example.c}}
\mynote {
\begin{itemize}
\item \texttt{realloc} allows you to \emph{reallocate memory} for a pointer.
\item You need to include \textbf{stdlib.h} to use \texttt{realloc}.
\item \texttt{ptr} must be a \texttt{NULL}, or a pointer to a memory block on the heap, i.e. space previously allocated with \nameref{ssub:malloc}, \nameref{ssub:calloc}, or \nameref{ssub:realloc}.
\item \texttt{realloc} returns \texttt{NULL} if it fails to allocate memory.
\item \texttt{realloc} may need to move the memory allocation, so you need to assign the result to a pointer as it may differ from the value passed to the \texttt{ptr} parameter.
\end{itemize}
}
% subsubsection realloc (end)
\clearpage
\subsubsection{free} % (fold)
\label{ssub:free}
When you allocate memory you are responsible for freeing that memory when you no longer require it. The \texttt{free} function allows you to do this.
\begin{table}[h]
\centering
\begin{tabular}{|c|p{9.5cm}|}
\hline
\multicolumn{2}{|c|}{\textbf{Procedure Prototype}} \\
\hline
\multicolumn{2}{|c|}{} \\
\multicolumn{2}{|c|}{\texttt{void free( void *ptr )}} \\
\multicolumn{2}{|c|}{} \\
\hline
\textbf{Parameter} & \textbf{Description} \\
\hline
\texttt{ ptr } & The pointer to the space to free on the heap.\\
\hline
\end{tabular}
\caption{Details of the \texttt{free} function}
\label{tbl:free}
\end{table}
\mynote{
\begin{itemize}
\item \texttt{free} allows you to free the memory allocated to a pointer.
\item You need to include \textbf{stdlib.h} to use \texttt{free}.
\item \texttt{ptr} a pointer to a memory block on the heap, i.e. space previously allocated with \nameref{ssub:malloc}, \nameref{ssub:calloc}, or \nameref{ssub:realloc}.
\item You can also pass \texttt{ptr} a \texttt{NULL} value, in which case nothing occurs.
\item It is good practice to assign a \texttt{NULL} value to the pointer after freeing it.
\end{itemize}
}
% subsubsection free (end)
% subsection c_memory_allocation (end) | {
"alphanum_fraction": 0.6905947846,
"avg_line_length": 39.0057142857,
"ext": "tex",
"hexsha": "584d570ee6e86454287c5bae400cab2ab8eb0288",
"lang": "TeX",
"max_forks_count": 6,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T07:42:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-02T03:18:37.000Z",
"max_forks_repo_head_hexsha": "8f3040983d420129f90bcc4bd69a96d8743c412c",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "macite/programming-arcana",
"max_forks_repo_path": "topics/dynamic-memory/c/c-malloc.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_issues_repo_issues_event_max_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-12-29T19:45:10.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "thoth-tech/programming-arcana",
"max_issues_repo_path": "topics/dynamic-memory/c/c-malloc.tex",
"max_line_length": 274,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bb5c0d45355bf710eff01947e67b666122901b07",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "thoth-tech/programming-arcana",
"max_stars_repo_path": "topics/dynamic-memory/c/c-malloc.tex",
"max_stars_repo_stars_event_max_datetime": "2021-08-10T04:50:54.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-10T04:50:54.000Z",
"num_tokens": 2180,
"size": 6826
} |
\documentclass[12pt]{article}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{graphicx}
\usepackage{hyperref}
\usepackage{xcolor}
\usepackage{minted}
\usemintedstyle{vs}
\usepackage[latin1]{inputenc}
\usepackage[left=2.00cm, right=2.00cm, top=2.00cm, bottom=2.00cm]{geometry}
\newtheorem{theorem}{Theorem}[section]
\newtheorem*{problem}{Problem}
\newtheorem*{proposition}{Proposition}
\newtheorem*{corollary}{Corollary}
\newtheorem*{lemma}{Lemma}
\definecolor{light-gray}{gray}{0.95}
\title{\textbf{Running Hierarchical State Machines in Python with Asyncio}}
\author{}
\date{}
\begin{document}
\maketitle
This document describes the \texttt{async\_hsm} package for running Hierarchical State Machines (HSM) in Python using the single-threaded asyncio paradigm. The core algorithm is based on work by Miro Samek described in the book ``Practical UML Statecharts in C/C++'' and the code was forked from the \texttt{farc} project by Dean Hall.
Like a normal finite state machine (FSM), an HSM consists of states which are connected by transitions which are triggered by \emph{events}. The hierarchical aspect of HSMs allows states to be nested within one other in parent-child relationships. The machine is in only one state at a time. Each state may have an entry action and an exit action specified, which are performed when the state is entered or exited. Each state recognizes a set of events, and the arrival of such an event will cause an action to take place. The event may or may not specify a transition to a target state. If an event is not recognized by a state, the enclosing (ancestor) states in the hierarchy are examined until one is found that does handle the event. If the event does not specify a state transition, the action is performed and the machine remains in the \emph{original} (inner) state, even though the handler is defined in the outer state. On the other hand, if the event \emph{does} cause a transition to a target state, the action is performed
and all the exit actions associated with going from the original state to the outer state which handles the event are obeyed before making the transition to the target state. If all the ancestors of a state do not handle an event, it is silently handled by an implicit top state which is defined as the common ancestor of all the states. This causes no state transition, and so the event is effectively ignored.
Performing a transition between two states in an HSM involves exiting states up to the last common ancestor (LCA) followed by entering states to the target state. All the exit and entry actions along the path are carried out. Note that we distinguish between remaining in the same state and transitioning from a state to itself. When transitioning from a state to itself, the exit action for the state is performed followed by the entry action. When remaining in a state, neither entry nor exit actions are performed. Following a transition to the target state, any initialization action defined for that state is performed, which will result in further state transitions.
When an HSM handles an event, the transitions and actions that it causes run to completion. In other words, any events that occur while the original event is being handled are just placed on an event queue, whether they arise from external sources or are generated within the actions performed during the processing. The next event is not fetched from the event queue until after processing of the first event is complete.
\begin{figure}[h]
\centering
\includegraphics[width=0.7\linewidth]{SM_of_Example_1}
\caption[Example of state transitions in an HSM]{Example state transitions in an HSM}
\label{fig:smofexample1}
\end{figure}
Referring to Figure \ref{fig:smofexample1}, let us consider the behavior of the HSM in response to several events.
\begin{itemize}
\item Since the initial state is defined to be \texttt{state1}, \texttt{ENTRY action 1} will be executed as the state is entered. Since \texttt{INIT action 1} is defined, this will be performed next, followed by a transition to \texttt{state2}, which causes execution of \texttt{ENTRY action 2}.
\item Suppose that \texttt{EVENT1} is received. Since this is handled by \texttt{state2}, the action \texttt{EVENT1 action 2} is performed followed by a transition to \texttt{state1} which performs \texttt{EXIT action 2}. Note that we do \emph{not} exit \texttt{state1}. Since \texttt{INIT action 1} is defined, this will be performed next, followed by a transition to \texttt{state2}, which causes execution of \texttt{ENTRY action 2}.
\item Next, suppose that \texttt{EVENT2} is received. Since this is handled by \texttt{state2}, the action \texttt{EVENT2 action 2} is performed followed by a transition to \texttt{state3} which performs \texttt{EXIT action 2} followed by \texttt{ENTRY action 3}.
\item Next, suppose that \texttt{EVENT1} is received. Since this is not handled by \texttt{state3}, we examine the parent \texttt{state1} which does handle it. This involves performing \texttt{EVENT1 action 1} but \emph{no} state transition, which leaves the machine in \texttt{state3}.
\item Finally, suppose that \texttt{EVENT2} is received. Since this is not handled by \texttt{state3}, we examine the parent \texttt{state1} which does handle it. The action \texttt{EVENT 2 action 1} is performed, which is followed by a transition, so we exit \texttt{state3}, performing \texttt{EXIT action 3} to get up to \texttt{state1}. We then perform the transition from \texttt{state1} to itself. As discussed previously, this causes \texttt{EXIT action 1} followed by \texttt{ENTER action 1} to be performed. Since \texttt{INIT action 1} is defined, this will be performed next, followed by a transition to \texttt{state2}, which causes execution of \texttt{ENTRY action 2}.
\end{itemize}
The event handling portion of an HSM is coded in the class \texttt{Hsm}. Instances of this class have a method called \texttt{dispatch} which takes an \texttt{Event} and performs all the actions and state transitions caused by that event before returning. The class \texttt{Ahsm} (an Augmented Hierarchical State Machine) is a subclass of \texttt{Hsm} which adds an event queue together with methods to post events to the queue using the FIFO or the LIFO discipline. In order to run a single HSM, it would be possible to write a task which fetches from the event queue and calls the \texttt{dispatch} method to process that event to completion before looping to fetch the next event. In the \texttt{async\_hsm} package, a separate \texttt{Framework} class is provided which allows a collection of inter-communicating \texttt{Ahsm} instances to be run concurrently. The operation of the \texttt{Framework} will be described in more detail later.
In normal use, an HSM is specified by subclassing \texttt{Ahsm}. Its operation is defined by writing ``state methods,'' one for each state of the machine. State transitions take place in response to namedtuples of type \texttt{Event}. Each such \texttt{event} has two parts, the first \texttt{event.signal} indicates the type of the event, while the second \texttt{event.value} can be any payload associated with the event. The type of an event is a \texttt{Signal}, which effectively acts as an enumeration. In order to create a signal named \texttt{SIGUSER}, the name is registered with the class by calling \texttt{Signal.register("SIGUSER")}. After performing this registration, we may use the notation \texttt{Signal.SIGUSER} and construct an event such as \texttt{Event(Signal.SIGUSER, payload)} which has \texttt{event.signal = Signal.SIGUSER} and \texttt{event.value = payload}.
The following code listing shows how the HSM in Figure 1 may be encoded as methods a class:
\begin{minted}
[
baselinestretch=1.0,
bgcolor=light-gray,
fontsize=\footnotesize,
linenos
]
{python}
from async_hsm import Ahsm, Event, Signal, state
class HsmExample1(Ahsm):
@state
def _initial(self, event):
Signal.register("E1")
Signal.register("E2")
return self.tran(self.state1)
@state
def state1(self, e):
sig = e.signal
if sig == Signal.ENTRY:
print("ENTRY action 1")
return self.handled(e)
elif sig == Signal.EXIT:
print("EXIT action 1")
return self.handled(e)
elif sig == Signal.INIT:
print("INIT action 1")
return self.tran(self.state2)
elif sig == Signal.E1:
print("Event 1 action 1")
return self.handled(event)
elif sig == Signal.E2:
print("Event 2 action 1")
return self.tran(self.state1)
return self.super(self.top)
@state
def state2(self, e):
sig = e.signal
if sig == Signal.ENTRY:
print("ENTRY action 2")
return self.handled(e)
elif sig == Signal.EXIT:
print("EXIT action 2")
return self.handled(e)
elif sig == Signal.E1:
print("Event 1 action 2")
return self.tran(self.state1)
elif sig == Signal.E2:
print("Event 2 action 2")
return self.tran(self.state3)
return self.super(self.state1)
@state
def state3(self, e):
sig = e.signal
if sig == Signal.ENTRY:
print("ENTRY action 3")
return self.handled(e)
elif sig == Signal.EXIT:
print("EXIT action 3")
return self.handled(e)
return self.super(self.state1)
\end{minted}
Each state method is decorated using \texttt{@state}. A state function is invoked with an argument \texttt{e} which is the event that it needs to handle. As mentioned previously, \texttt{e.signal} is a signal defining the type of the event and \texttt{e.value} is the payload. Every state function must return one of the following, depending on the type of the signal
\begin{itemize}
\item \texttt{self.handled(e)}. This indicates that the event has been handled and should not cause a state transition. Events of type \texttt{Signal.ENTRY} and \texttt{Signal.EXIT} should always return in this way if they are handled.
\item \texttt{self.tran(next\_state)}. This indicates that the machine should transition to \texttt{next\_state} (which is a state method) when an event of this type occurs. An event of type \texttt{Signal.INIT} should return with a transition to a substate of the current state if it is handled.
\item \texttt{self.super(parent\_state)}. This should be the default return value. The method gets here if the event is not explicitly handled within this state. Note that this default return value informs the code of the identity of the parent of this state. For states which do not have an explicit parent, the return value should be \texttt{self.super(self.top)} since \texttt{self.top} is an internally generated top level state which is the ancestor of all user-defined states.
\end{itemize}
In the listing, we see how each state method essentially goes through the possible signals in the event passed to it and handles each of them if it can. It should be evident how the code may be written down directly from the state chart of Figure \ref{fig:smofexample1}. The parent of each state is specified in the last return statement of the method, which is executed if the event is not explicitly handled otherwise.
Note that a special state method \texttt{\_initial} is required which is used to specify the transition to the actual initial state. This method is called once when the machine is entered, and so is also useful for performing any other initialization required, such as registering signal types.
\begin{minted}
[
baselinestretch=1.0,
bgcolor=light-gray,
fontsize=\footnotesize,
linenos
]
{python}
async def main():
hsm = HsmExample1()
hsm.start(0)
while not hsm.terminated:
sig_name = input('\tEvent --> ')
try:
sig = getattr(Signal, sig_name)
except LookupError:
print("\nInvalid signal name", end="")
continue
event = Event(sig, None)
hsm.dispatch(event)
await Framework.done()
if __name__ == "__main__":
asyncio.run(main())
\end{minted}
In order to run the hierarchical state machine, we may call its \texttt{dispatch} method, passing in the event that we require it to handle. The above listing provides a simple interactive script which prompts the user for a signal to be handled by the HSM. The line \texttt{await Framework.done()} will be discussed in more detail later, it ensures that the \texttt{main} co-routine does not exit until all the state machines associated with the \texttt{Framework} have terminated. The output of the program is shown below for the sequence of events described in the example above.
\begin{minted}
[
baselinestretch=1.0,
bgcolor=light-gray,
fontsize=\footnotesize,
linenos
]
{text}
ENTRY action 1
INIT action 1
ENTRY action 2
Event --> E1
Event 1 action 2
EXIT action 2
INIT action 1
ENTRY action 2
Event --> E2
Event 2 action 2
EXIT action 2
ENTRY action 3
Event --> E1
Event 1 action 1
Event --> E2
Event 2 action 1
EXIT action 3
EXIT action 1
ENTRY action 1
INIT action 1
ENTRY action 2
Event -->
\end{minted}
The state machine is run within a coroutine \texttt{main} using the \texttt{asyncio.run} function. In this simple example, a sequence of \texttt{Event} messages is sent to the state machine one-by-one using the \texttt{dispatch} method. More generally as a result of performing the actions associated with the state machine, new events may be generated. The methods \texttt{postFIFO} and \texttt{postLIFO} are defined on the \texttt{Ahsm} class and these allow events to be enqueued for processing after the current event handler has run to completion.
The following code fragment shows how a sequence of events (specified in \texttt{seq} followed user-provided input) may be processed. Each event is placed on the queue (using \texttt{postFIFO}) and the \texttt{dispatch} method is called in a loop while the queue still has elements in it. In this way, events placed on the message queue during the running of the state machine are processed and run to completion before the next user event is enqueued.
\begin{minted}
[
baselinestretch=1.0,
bgcolor=light-gray,
fontsize=\footnotesize,
linenos
]
{python}
async def main():
hsm = HsmExample1()
seq = ['E1', 'E2', 'E1', 'E2']
hsm.start(0)
while not hsm.terminated:
if seq:
sig_name = seq.pop(0)
print(f'\tEvent --> {sig_name}')
else:
sig_name = input('\tEvent --> ')
try:
sig = getattr(Signal, sig_name)
hsm.postFIFO(Event(sig, None))
except LookupError:
print("\nInvalid signal name", end="")
continue
while hsm.has_msgs():
event = hsm.pop_msg()
hsm.dispatch(event)
print("\nTerminated")
await Framework.done()
if __name__ == "__main__":
asyncio.run(main())
\end{minted}
Running this program gives the same output as given above. At the prompt, additional events can be given or the \texttt{Ctrl-C} key combination may be entered. The latter generates an \texttt{Event.TERMINATE} message that is posted to the message FIFOs of the state machine(s). Within the internally-generated \texttt{top} state handler method, the \texttt{Event.TERMINATE} message causes a transition to the \texttt{\_exit} state. In this state, the \texttt{terminated} attribute of the machine is set, causing the program to break out of the loop. The last line of the \texttt{main} function \texttt{await Framework.done()} waits until all state machines in the framework have set their \texttt{terminated} attributes.
\begin{figure}[h]
\centering
\includegraphics[width=0.7\linewidth]{internally_generated_states}
\caption[Internally generated HSM states]{Internally generated HSM states}
\label{fig:internallygeneratedstates}
\end{figure}
\subsection*{Error Handling}
If an exception is raised within any of the state-handler methods of an HSM class, the exception is caught and information about the exception is packaged into a special event whose signal type is \texttt{Signal.ERROR}. By default, this is posted to the message FIFO of state machine in which the exception was raised. The payload of the event (in its \texttt{value} attribute) is a dictionary with the following keys:
\begin{description}
\item[exc] The Python exception object
\item[traceback] A string with the traceback for the exception
\item[location] The name of the class in which the exception occurred
\end{description}
Within the internally-generated \texttt{top} state handler method, the \texttt{ERROR} event causes the exception message and traceback to be printed on the console, without a change of state.
In order to demonstrate how exceptions are handled by default, add the line \texttt{1/0} immediately following the line \texttt{print("Event 1 action 2")} in the state handler method \texttt{state2}. The result of the run changes to:
\begin{minted}
[
baselinestretch=1.0,
bgcolor=light-gray,
fontsize=\footnotesize,
linenos
]
{text}
ENTRY action 1
INIT action 1
ENTRY action 2
Event --> E1
Event 1 action 2
Exception division by zero
Traceback (most recent call last):
File "c:\github\async_hsm\async_hsm\__init__.py", line 382, in dispatch
r = s(event) # invoke state handler
File "c:\github\async_hsm\async_hsm\__init__.py", line 155, in func_wrap
result = func(self, evt)
File "Hsm_example1.py", line 45, in state2
1 / 0
ZeroDivisionError: division by zero
Event --> E2
Event 2 action 2
EXIT action 2
ENTRY action 3
Event --> E1
Event 1 action 1
Event --> E2
Event 2 action 1
EXIT action 3
EXIT action 1
ENTRY action 1
INIT action 1
ENTRY action 2
Event --> <Ctrl-C>
EXIT action 2
EXIT action 1
Invalid signal name
Terminated
\end{minted}
\section*{The Framework class for systems of hierarchical state machines}
When there is more than one hierarchical state machine, it is useful to have a framework within which all the machines can run. Each HSM is provided with its own event queue and is assigned a unique priority level. The framework is responsible for calling the dispatch function of each machine, passing it messages from its event queue. An HSM can post messages not only to its own queue, but can use the static method \texttt{publish} of the \texttt{Framework} class to post it to the queues of all machines which have opted to subscribe to messages of that type. In this way, machines can communicate with each other. The static method \texttt{Framework.subscribe} is passed a string with the name of the \texttt{Signal} to which the machine wishes to subscribe.
It is important to define the sequence in which all the HSMs receive their messages. The framework starts with the machine with the highest priority and calls its dispatch method with the event (if any) at the head of its queue. It proceeds to the machine with the next highest priority and does the same, and continues until all machines have been handled. If any event queue is non-empty, the cycle repeats with the highest priority machine. New events can be placed on the event queue(s) from external sources such as timers, user input, or as a result of actions triggered by earlier events.
Using the static method \texttt{Framework.publish()} simplifies interacting with one or more state machines, since it sends the event to the state machines (which have subscribed) and then calls \texttt{Framework.run()} to perform all the actions that follow from that event. The following listing sends the sequence of events specified in \texttt{seq} to the state machine \texttt{HsmExample1}.
\begin{minted}
[
baselinestretch=1.0,
bgcolor=light-gray,
fontsize=\footnotesize,
linenos
]
{python}
async def main():
hsm = HsmExample1()
seq = ['E1', 'E2', 'E1', 'E2']
hsm.start(0)
for sig_name in seq:
sig = getattr(Signal, sig_name)
print(f'\tEvent --> {sig_name}')
Framework.publish(Event(sig, None))
# Allow other tasks to run
await asyncio.sleep(0)
# Wait for CTRL-C to signal TERMINATE to all the HSMs
await Framework.done()
if __name__ == "__main__":
asyncio.run(main())
\end{minted}
It is necessary to subscribe to the events in the class \texttt{HsmExample1}.
This can be done by modifying the \texttt{\_initial} method to use \texttt{Framework.subscribe} instead of \texttt{Signal.register} as shown. Note that it is also necessary to pass the state machine that wishes to subscribe to the signal as the second argument to \texttt{Framework.subscribe}.
\begin{minted}
[
baselinestretch=1.0,
bgcolor=light-gray,
fontsize=\footnotesize,
linenos
]
{python}
@state
def _initial(self, event):
Framework.subscribe("E1", self)
Framework.subscribe("E2", self)
return self.tran(self.state1)
\end{minted}
\end{document}
\section*{Running multiple state machines with additional asyncio tasks in an event loop}
When we need to run a collection of hierarchical state machines concurrently, the \texttt{asyncio} Python framework is used to manage their execution within a single event loop. | {
"alphanum_fraction": 0.744299293,
"avg_line_length": 59.8235294118,
"ext": "tex",
"hexsha": "e1a89f69dd20062288f733d111866ae085074e91",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8e23a94c3e9d009536002d1469e81fd165a23e01",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SzeMengTan/async_hsm",
"max_forks_repo_path": "docs/Asyncio_Hierarchical_State_Machines.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8e23a94c3e9d009536002d1469e81fd165a23e01",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SzeMengTan/async_hsm",
"max_issues_repo_path": "docs/Asyncio_Hierarchical_State_Machines.tex",
"max_line_length": 1035,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8e23a94c3e9d009536002d1469e81fd165a23e01",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SzeMengTan/async_hsm",
"max_stars_repo_path": "docs/Asyncio_Hierarchical_State_Machines.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5260,
"size": 21357
} |
%
\documentclass[12pt,a4paper]{article}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage[affil-it]{authblk}
% Math
\usepackage{amsmath}
% Links
\usepackage[pdftex]{hyperref}%
\hypersetup{%
colorlinks=true,%
urlcolor=black,%
citecolor=black,%
linkcolor=black,%
pdfauthor="T. Whyntie"}
% Front matter formatting
\renewcommand\Affilfont{\fontsize{9}{10.8}\itshape}
\title{Maxwell's equations with and without magnetic charge}
\author[1,2]{T.~Whyntie}
\affil[1]{School of Physics \& Astronomy, Queen Mary University of London, UK}
\affil[2]{Institute for Research in Schools, UK}
% Omit the date.
\date{}
\begin{document}
\input{defs}
\maketitle
\begin{abstract}
Maxwell's equations of classical electromagnetism~\cite{Maxwell1865}
are presented,
in Gaussian units\footnote{
The equations are written in the \emph{Gaussian unit}
system, and not SI units, for simplicity;
in this system the electric field $\electricfield$
and the magnetic field $\magneticfield$ have the same units.},
without (\ref{eq:1},~\ref{eq:2},~\ref{eq:3},~\ref{eq:4})
and
with (\ref{eq:1m},~\ref{eq:2m},~\ref{eq:3m},~\ref{eq:4m})
magnetic charge.
Equations~\ref{eq:1m} to~\ref{eq:4m} are
taken from the front cover of the
Monopole and Exotics Detector at the LHC (MoEDAL)
Technical Design Report~\cite{MoEDAL2009},
the Large Hadron Collider's seventh major experiment
and the latest venture to be undertaken in the search for
Dirac's hypothesised magnetic monopole~\cite{Dirac1931}.
\end{abstract}
\begin{eqnarray}
\label{eq:1} \divvec{\electricfield} & = & 4 \pi \electricchargedensity\\ [10pt]
\label{eq:2} \divvec{\magneticfield} & = & 0\\ [10pt]
\label{eq:3} - \curlvec{\electricfield} & = & \myfrac{1}{\speedoflight} \, \timediff{\magneticfield}\\ [10pt]
\label{eq:4} \curlvec{\magneticfield} & = & \myfrac{1}{\speedoflight} \, \timediff{\electricfield} + \myfrac{4 \pi}{\speedoflight} \, \electriccurrentdensity
\end{eqnarray}
\begin{eqnarray}
\label{eq:1m} \divvec{\electricfield} & = & 4 \pi \electricchargedensity\\ [10pt]
\label{eq:2m} \divvec{\magneticfield} & = & 4 \pi \magneticchargedensity\\ [10pt]
\label{eq:3m} - \curlvec{\electricfield} & = & \myfrac{1}{\speedoflight} \, \timediff{\magneticfield} + \myfrac{4 \pi}{\speedoflight} \, \magneticcurrentdensity\\ [10pt]
\label{eq:4m} \curlvec{\magneticfield} & = & \myfrac{1}{\speedoflight} \, \timediff{\electricfield} + \myfrac{4 \pi}{\speedoflight} \, \electriccurrentdensity
\end{eqnarray}
\section*{Acknowlegdements}
This work was supported by the
UK Science and Technology Facilities Council (STFC)
through grant ST/N00101X/1.
The author wishes to thank A. Pontzen for many
fruitful discussions.
\bibliographystyle{elsarticle-num}
\bibliography{bibfile}
\end{document}
%
| {
"alphanum_fraction": 0.7332605531,
"avg_line_length": 31.9534883721,
"ext": "tex",
"hexsha": "a0a5d3177bffd3adc5cbae84a4f39b11f90ffe25",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-01-16T15:11:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-05-24T05:22:41.000Z",
"max_forks_repo_head_hexsha": "57dbba6672cc53ef34c30fd9e97819688c562f3b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "twhyntie/maxwells_equations",
"max_forks_repo_path": "maxwell.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "57dbba6672cc53ef34c30fd9e97819688c562f3b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "twhyntie/maxwells_equations",
"max_issues_repo_path": "maxwell.tex",
"max_line_length": 169,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "57dbba6672cc53ef34c30fd9e97819688c562f3b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "twhyntie/maxwells_equations",
"max_stars_repo_path": "maxwell.tex",
"max_stars_repo_stars_event_max_datetime": "2019-05-24T05:22:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-21T12:26:30.000Z",
"num_tokens": 938,
"size": 2748
} |
\documentclass[12pt, a4paper]{article}
\usepackage[margin=0.5in]{geometry}
\usepackage{color}
\usepackage[dvipsnames]{xcolor}
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
linkcolor=blue,
urlcolor=blue,
linktoc=all
}
\usepackage{amsmath}
\usepackage{mathtools}
\usepackage{amssymb}
\usepackage{cancel}
\usepackage{bm}
\usepackage{dsfont}
\usepackage{graphicx}
\usepackage{graphics}
\usepackage{xfrac}
\usepackage{array}
\setcounter{MaxMatrixCols}{40}
\usepackage{enumerate}
\usepackage{enumitem}
\usepackage{multirow}
%inclusions carried over from past class homework formats
\usepackage{units}
\usepackage{fullpage}
\usepackage{alltt}
\usepackage{mathrsfs}
\usepackage{xcolor}
\usepackage{soul}
\usepackage{pgfplots}
\DeclarePairedDelimiter{\abs}{\lvert}{\rvert}
\newcommand*{\fontCourier}{\fontfamily{pcr}\selectfont}
\newcommand*\mean[1]{\overline{#1}}
\newcommand\scalemath[2]{\scalebox{#1}{\mbox{\ensuremath{\displaystyle #2}}}}
\setcounter{tocdepth}{5}
\setcounter{secnumdepth}{5}
\usepackage{pdfpages}
\usepackage{Sweave}
\begin{document}
\includepdf{TitlePage_MastersThesis}
\includepdf{ThesisApprovalPage}
\input{Thesis-concordance}
\tableofcontents
\newpage
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%INTRODUCTION
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Thesis Abstract}
\begin{itemize}
\item (paragraph) Statement of the thesis topic and objectives
\item (paragraph) Explanation of R package
\end{itemize}
\clearpage
\section{Introduction: Predictive Inference}
\subsection{Why is predictive inference important?}
\subsection{Difference between parametric inference and predictive inference}
\subsubsection{When is predictive inference more useful?}
\subsubsection{When is parametric inference more useful?}
[examples, comparisons]
\subsection{The Bayesian Parametric Prediction Format}
[Geisser p. 49]\\
Let $$f\left(x^{(N)},x_{(M)}|\theta\right) = f\left(x_{(M)}|x^{(N)},\theta\right)f\left(x^{(N)}|\theta\right).$$
Here $x^{(N)}$ represents observed events and $x_{(M)}$ are future events. We calculate
$$f\left(x_{(M)},x^{(N)}\right) = \int f\left(x^{(N)},x_{(M)}|\theta\right)p(\theta)d\theta$$
where $p(\theta)$ is the prior density and
$$f\left(x_{(M)}|x^{(N)}\right) = \frac{f\left(x_{(M)},x^{(N)}\right)}{f\left(x^{(N)}\right)} = \int f\left(x_{(M)}|\theta\right)p\left(\theta|x^{(N)}\right)d\theta$$
where
$$p\left(\theta|x^{(N)}\right) \propto f\left(x^{(N)}|\theta\right)p(\theta).$$
\subsection{[Maybe] Example of Difference between results from Plug-in estimator and results using Predictive Inference}
\clearpage
\section{Chapter 1: Predictive Problems with Conjugate Priors}
[Problems with closed-form solutions. These problems will be what the R package is designed for. Use problems from Geisser, Casella \& Berger (Bayesian chapter), other sources. Regression problem--predictive distributions of models that include and exclude some predictor]
\subsection{Prediction of Future Successes: Beta-Binomial (Geisser p. 73)}
\subsubsection{Derivation}
Let $X_i$ be independent binary variables with Pr$(X_i = 1) = \theta$, and let $T = \sum X_i$. Then $T$ has probability
$${N\choose t}\theta^t(1-\theta)^{N-t}.$$
\vspace{5mm}
Assume $\theta\sim\text{Beta}(\alpha,\boldsymbol\beta)$, so
\vspace{5mm}
$$p(\theta) = \frac{\Gamma(\alpha + \beta)\theta^{\alpha - 1}(1 - \theta)^{\beta - 1}}{\Gamma(\alpha)\Gamma(\beta)}.$$
\vspace{5mm}
Then
\vspace{5mm}
$$p\left(\theta|X^{(N)}\right) = \frac{\Gamma(N+\alpha+\beta)\theta^{t+\alpha-1}(1-\theta)^{N-t+\beta-1}}{\Gamma(t+\alpha)\Gamma(N-t+\beta)}$$
\vspace{5mm}
\noindent So for $R = \sum_{i=1}^M X_{N+i}$ we have Beta-Binomial predictive distribution
\begin{flalign*}
\text{Pr}[R=r|t]
&= \int {M\choose r}\theta^r(1-\theta)^{M-r}p\left(\theta|X^{(N)}\right)d\theta\\
&\\
&= {M\choose r}\int \theta^r(1-\theta)^{M-r}\frac{\Gamma(N+\alpha+\beta)}{\Gamma(t+\alpha)\Gamma(N-t+\beta)}\theta^{t+\alpha-1}(1-\theta)^{N-t+\beta-1}d\theta\\
&\\
&= \frac{M!}{r!(M-r)!}\frac{\Gamma(N+\alpha+\beta)}{\Gamma(t+\alpha)\Gamma(N-t+\beta)}\int\theta^{r+t+\alpha-1}(1-\theta)^{M-r+N-t+\beta-1}d\theta\\
&\\
&= \frac{\Gamma(M+1)\Gamma(N+\alpha+\beta)\Gamma(r+t+\alpha)\Gamma(M-r+N-t+\beta)}{\Gamma(r+1)\Gamma(M-r+1)\Gamma(t+\alpha)\Gamma(N-t+\beta)\Gamma(M+N+\alpha+\beta)}
\end{flalign*}
\clearpage
\subsubsection{R Implementation}
This result has been used to create ``standard" R functions dpredBB(), ppredBB(), and rpredBB() for the Beta-Binomial distribtuion for density, cumulative probability, and random sampling, respectively (see appendix). These functions are exercised in the following example.
\subsubsection{Example}
Suppose $t=5$ successes have been observed out of $N=10$ binary events, $\alpha = 2$ and $\beta = 8$. For $M = 1000$ future observations, the figures below show the predictive distribution from dpredBB(), the cumulative distribution from ppredBB(), and a histogram of random draws from rpredBB().
\includegraphics{Thesis-002}
\subsection{Survival Time: Exponential-Gamma (Geisser p. 74)}
\subsubsection{Derivation}
Suppose $X^{(N)} = \left(X^{(d)},X^{(N-d)}\right)$ where $X^{(d)}$ represents copies fully observed from an exponential survival time density
$$f(x|\theta) = \theta e^{-\theta x}$$
and $X^{(N-d)}$ represents copies censored at $x_{d+1},...,x_N$, respectively. Hence
$$L(\theta)\propto\theta^d e^{-\theta N\bar{x}}$$
when $N\bar{x} = \sum_1^N{x_i}$, as shown below.\\
The usual exponential likelihood is used for the fully observed copies, whereas for the censored copies we need Pr$(x > \theta) = 1 - \text{Pr}(x\leq\theta) = 1 - F(x|\theta) = 1 - (1 - e^{-\theta x}) = e^{-\theta x}$. Thus the overall likelihood is
$$L(\theta|x) = \prod_{i=1}^d\theta e^{-\theta x_i}\prod_{i=d+1}^N e^{-\theta x_i} = \theta^d e^{-\theta N\bar{x}}$$
Assuming a Gamma$(\delta,\gamma)$ prior for $\theta$,
$$p(\theta) = \frac{\gamma^\delta\theta^{\delta - 1}e^{-\gamma\theta}}{\Gamma(\delta)}$$
we obtain the posterior
%$$p\left(\theta|X^{(N)}\right) = \frac{p\left(x^{(N)}|\theta\right)p(\theta)}{\int p\left(X^{(N)}|\theta\right)p(\theta)d\theta} = \frac{(\gamma+N\bar{x})^{d+\delta}\theta^{d+\delta - 1}e^{-\theta(\gamma+N\bar{x})}}{\Gamma(d+\delta)}$$
\begin{flalign*}
p\left(\theta|X^{(N)}\right)
&= \frac{p\left(x^{(N)}|\theta\right)p(\theta)}{\int p\left(X^{(N)}|\theta\right)p(\theta)d\theta}\\
&\\
&= \frac{\theta^d e^{-\theta N\bar{x}}\cdot\frac{\gamma^\delta\theta^{\delta - 1}e^{-\gamma\theta}}{\Gamma(\delta)}}{\int\left(\theta^d e^{-\theta N\bar{x}}\cdot\frac{\gamma^\delta\theta^{\delta - 1}e^{-\gamma\theta}}{\Gamma(\delta)}\right)d\theta}\\
&\\
&= \frac{\cancel{\frac{\gamma^\delta}{\Gamma(\delta)}}\left(\theta^{d+\delta - 1}e^{-\theta(\gamma+N\bar{x})}\right)}{\cancel{\frac{\gamma^\delta}{\Gamma(\delta)}}\int\left(\theta^{d+\delta - 1}e^{-\theta(\gamma+N\bar{x})}\right)d\theta}\\
&\\
&= \frac{\frac{(\gamma+N\bar{x})^{d+\delta}}{\Gamma(d+\delta)}\left(\theta^{d+\delta - 1}e^{-\theta(\gamma+N\bar{x})}\right)}{\cancel{\frac{(\gamma+N\bar{x})^{d+\delta}}{\Gamma(d+\delta)}\int\left(\theta^{d+\delta - 1}e^{-\theta(\gamma+N\bar{x})}\right)d\theta}}\\
&\\
&= \frac{(\gamma+N\bar{x})^{d+\delta}\theta^{d+\delta - 1}e^{-\theta(\gamma+N\bar{x})}}{\Gamma(d+\delta)}
\end{flalign*}
with the Gamma$(d+\delta,\gamma+N\bar{x})$ density in the next to last step integrating to $1$.\\
Thus the survival time predictive probability is
\begin{flalign*}
P\left(X = x|\theta,X^{(N)}\right)
&= \int p\left(\theta|X^{(N)}\right)p(x|\theta)d\theta\\
&\\
&= \int\frac{(\gamma+N\bar{x})^{d+\delta}\theta^{d+\delta - 1}e^{-\theta(\gamma+N\bar{x})}}{\Gamma(d+\delta)}\cdot\theta e^{-\theta x}d\theta\\
&\\
&= (d+\delta)(\gamma+N\bar{x})^{d+\delta}\int\frac{\theta^{(d+\delta + 1) - 1}e^{-\theta(\gamma+N\bar{x} + x)}}{(d+\delta)\Gamma(d+\delta)}d\theta\\
&\\
&= \frac{(d+\delta)(\gamma+N\bar{x})^{d+\delta}}{\left(\gamma+N\bar{x}+x\right)^{d+\delta+1}}\int\frac{\left(\gamma+N\bar{x}+x\right)^{d+\delta+1}\theta^{(d+\delta + 1) - 1}e^{-\theta(\gamma+N\bar{x} + x)}}{\Gamma(d+\delta+1)}d\theta\\
&\\
&= \frac{(d+\delta)(\gamma+N\bar{x})^{d+\delta}}{\left(\gamma+N\bar{x}+x\right)^{d+\delta+1}}
\end{flalign*}
(simplifying by constructing a Gamma$(d+\delta+1,\gamma+N\bar{x}+x)$ density in the final integrand.)\\
\subsubsection{R Implementation}
This result has been used to create standard format R functions dpredEG(), ppredEG(), and rpredEG() for the Gamma-Exponential distribtuion for density, cumulative probability, and random sampling, respectively (see appendix). These functions are exercised in the following example.
\subsubsection{Example}
Suppose $d=800$ out of $N = 1000$ copies have been observed, and the remaining $200$ censored. Say $\delta = 20$, $\gamma=5$, and we are interested in the number of survivors out of $M = 1000$ future observations. The figures below illustrate the predictive probability using dpredEG() and rpredEG(), along with a histogram of a random sample taken using rpredEG().
\includegraphics{Thesis-003}
\clearpage
\subsection{Poisson-Gamma Model (Hoff p. 43ff)}
\subsubsection{Derivation}
[using Hoff's notation and variable names below. Should I convert this to Geisser's $x^{(N)},x_{(M)}$ convention for uniformity throughout my thesis?]\\\\
Suppose $Y_1,...,Y_n|\theta\overset{i.i.d.}{\sim}\text{Poisson}(\theta)$ with Gamma prior $\theta\sim\text{Gamma}(\alpha,\beta)$. That is,
\begin{flalign*}
P\left(Y_1 = y_1,...,Y_n = y_n|\theta\right)
&= \prod_{i=1}^n p\left(y_i|\theta\right)\\
&\\
&= \prod_{i=1}^n\frac{1}{y!}\theta^{y_i}e^{-\theta}\\
&\\
&= \left(\prod_{i=1}^n\frac{1}{y!}\right)\theta^{\sum y_i}e^{-n\theta}\\
&\\
&= c\left(y_1,...,y_n\right)\theta^{\sum y_i}e^{-n\theta}
\end{flalign*}
and
$$p(\theta) = \dfrac{\beta^\alpha}{\Gamma(\alpha)}\theta^{\alpha-1}e^{-\beta\theta}, \theta, \alpha, \beta > 0.$$
\bigskip
Then we have posterior distribution
\begin{flalign*}
p\left(\theta|y_1,...,y_n\right)
&= \dfrac{p\left(y_1,...,y_n|\theta\right)p(\theta)}{\int_\theta p\left(y_1,...,y_n|\theta\right)p(\theta)}\\
&\\
&= \dfrac{p\left(y_1,...,y_n|\theta\right)p(\theta)}{p\left(y_1,...,y_n\right)}\\
&\\
&= \dfrac{1}{p\left(y_1,...,y_n\right)}\theta^{\sum y_i}e^{-n\theta}\dfrac{\beta^\alpha}{\Gamma(\alpha)}\theta^{\alpha - 1}e^{-\beta\theta}\\
&\\
&= C\left(y_1,...,y_n,\alpha,\beta\right)\theta^{\alpha+\sum y_i - 1}e^{-(\beta + n)\theta}\\
&\\
&\sim \text{Gamma}\left(\alpha+\sum y_i,\beta + n\right).
\end{flalign*}
Here
\begin{flalign*}
C\left(y_1,...,y_n,\alpha,\beta\right)
&= \dfrac{1}{p\left(y_1,...,y_n\right)}\cdot\dfrac{\beta^\alpha}{\Gamma(\alpha)}\\
&\\
&= \dfrac{1}{\int_\theta p\left(y_1,...,y_n|\theta\right)p(\theta)}\cdot\dfrac{\beta^\alpha}{\Gamma(\alpha)}\\
&\\
&= \dfrac{1}{\int_\theta\left(\prod\frac{1}{y_i!}\right)\theta^{\sum y_i}e^{-n\theta}\cancel{\left(\frac{\beta^\alpha}{\Gamma(\alpha)}\right)}\theta^{\alpha-1}e^{-\beta\theta}}\cdot\cancel{\left(\frac{\beta^\alpha}{\Gamma(\alpha)}\right)}
&\\
&= \dfrac{1}{\left(\prod\frac{1}{y_i!}\right)\frac{\Gamma(\alpha + \sum y_i)}{(\beta+n)^{\alpha+\sum y_i}}\int_\theta \frac{(\beta+n)^{\alpha+\sum y_i}}{\Gamma(\alpha+\sum y_i)}\theta^{\sum y_i+\alpha-1}e^{-(\beta+n)\theta}}\\
&\\
&= \dfrac{\prod_{i=1}^n y_i!(\beta+n)^{\alpha+\sum y_i}}{\Gamma(\alpha+\sum y_i)}
\end{flalign*}
Call this constant $C_n$ (for $n$ observations).
\bigskip
Note that an additional observation $y_{n+1} = \tilde{y}$ the constant becomes
$$C_{n+1} = \dfrac{\prod_{i=1}^{n+1} y_i!(\beta+n+1)^{\alpha+\sum_{i=1}^{n+1} y_i}}{\Gamma(\alpha+\sum_{i=1}^{n+1} y_i)}.$$
Also note that the marginal joint distribution of $k$ observations is
$$p\left(\tilde{y}|y_1,...,y_k\right) = \dfrac{1}{C_k}\dfrac{\beta^\alpha}{\Gamma(\alpha)}.$$
For future observation $\tilde{y}$, then, we compute predictive distribution
\begin{flalign*}
p\left(\tilde{y}|y_1,...,y_n\right)
&= \dfrac{p\left(y_1,...,y_n,\tilde{y}\right)}{p\left(y_1,...,y_n\right)} = \dfrac{p\left(y_1,...,y_{n+1}\right)}{p\left(y_1,...,y_n\right)}
= \dfrac{\frac{1}{C_{n+1}}\cancel{\frac{\beta^\alpha}{\Gamma(\alpha)}}}{\frac{1}{C_n}\cancel{\frac{\beta^\alpha}{\Gamma(\alpha)}}}
= \dfrac{C_n}{C_{n+1}}\\
&\\
&= \dfrac{\dfrac{\prod_{i=1}^n y_i!(\beta+n)^{\alpha+\sum_{i=1}^n y_i}}{\Gamma(\alpha+\sum_{i=1}^n y_i)}}{\dfrac{\prod_{i=1}^{n+1} y_i!(\beta+n+1)^{\alpha+\sum_{i=1}^{n+1} y_i}}{\Gamma(\alpha+\sum_{i=1}^{n+1} y_i)}}\\
&\\
&= \dfrac{\Gamma\left(\alpha+\sum_{i=1}^{n+1}y_i\right)(\beta+n)^{\alpha+\sum_{i=1}^n y_i}}{\left(y_{n+1}!\right)\Gamma\left(\alpha+\sum_{i=1}^n y_i\right)(\beta+n+1)^{\alpha+\sum_{i=1}^{n+1}y_i}}\\
&\\
&= \dfrac{\Gamma\left(\alpha+\sum_{i=1}^n y_i + \tilde{y}\right)(\beta+n)^{\alpha+\sum_{i=1}^n y_i}}{\left(\tilde{y}!\right)\Gamma\left(\alpha+\sum_{i=1}^n y_i\right)(\beta+n+1)^{\alpha+\sum_{i=1}^n y_i + \tilde{y}}}\\
&\\
&= \dfrac{\Gamma\left(\alpha+\sum y_i+\tilde{y}\right)}{\Gamma(\tilde{y}+1)\Gamma(\alpha+\sum y_i)}\cdot \left(\dfrac{\beta+n}{\beta+n+1}\right)^{\alpha+\sum y_i} \cdot \left(\dfrac{1}{\beta+n+1}\right)^{\tilde{y}}\\
\end{flalign*}
This is a negative binomial distribution: $\tilde{y}\sim NB\left(\alpha+\sum y_i,\beta+n\right)$, for which
\begin{flalign*}
E\left[\tilde{Y}|y_1,...,y_n\right] &= \dfrac{a+\sum{y_i}}{b+n} = E\left[\theta|y_1,...,y_n\right];\\
&\\
\text{Var}\left[\tilde{Y}|y_1,...,y_n\right] &= \dfrac{a+\sum{y_i}}{b+n}\dfrac{b+n+1}{b+n}\\
&\\
&=\text{Var}\left[\theta|y_1,...,y_n\right]\times(b+n+1)\\
&\\
&=E\left[\theta|y_1,...,y_n\right]\times\dfrac{b+n+1}{b+n}\\
\end{flalign*}
\vspace{5mm}
\hrule
\vspace{5mm}
[Showing here that it is indeed a NB distribution]
$$\theta\sim NB(\alpha,\beta)\Rightarrow p(\theta) = \binom{\theta+\alpha-1}{\alpha - 1}\left(\dfrac{\beta}{\beta+1}\right)^\alpha\left(\dfrac{1}{\beta+1}\right)^\theta$$
\begin{center}so\end{center}
\begin{flalign*}
\tilde{y}\sim NB\left(\alpha + \sum{y_i}),\beta+n\right)\Rightarrow p(\tilde{y})
&= \binom{\tilde{y}+\alpha+\sum{y_i}-1}{\alpha+\sum{y_i}-1}\left(\dfrac{\beta+n}{\beta+n+1}\right)^{\alpha+\sum{y_i}}\left(\dfrac{1}{\beta+n+1}\right)^{\tilde{y}}\\
&\\
&= \dfrac{\left(\alpha + \sum{y_i} + \tilde{y} - 1\right)!}{\left(\alpha + \sum{y_i} - 1\right)!\left(\tilde{y}\right)!}\left(\dfrac{\beta+n}{\beta+n+1}\right)^{\alpha+\sum{y_i}}\left(\dfrac{1}{\beta+n+1}\right)^{\tilde{y}}\\
&\\
&= \dfrac{\Gamma\left(\alpha + \sum{y_i} + \tilde{y}\right)}{\Gamma\left(\alpha + \sum{y_i}\right)\Gamma\left(\tilde{y}+1\right)}\left(\dfrac{\beta+n}{\beta+n+1}\right)^{\alpha+\sum{y_i}}\left(\dfrac{1}{\beta+n+1}\right)^{\tilde{y}}
\end{flalign*}
\vspace{5mm}
\hrule
\vspace{5mm}
\bigskip
[This is the result in Hoff. The straightforward derivation below is off by a constant multiple. Need to figure out what went awry.]
\begin{flalign*}
p\left(\tilde{y}|y_1,...,y_n\right)
&= \int_0^\infty p\left(\tilde{y}|\theta,y_1,...,y_n\right)p\left(\theta|y_1,...,y_n\right)d\theta\\
&\\
&= \int p\left(\tilde{y}|\theta\right)p\left(\theta|y_1,...,y_n\right)d\theta\\
&\\
&= C\int\left(\dfrac{1}{\tilde{y}!}\theta^{\tilde{y}}e^{-\theta}\right)\theta^{\alpha+\sum y_i - 1}e^{-(\beta+n)\theta}d\theta\\
&\\
&= \dfrac{C}{\tilde{y}!}\int\theta^{\tilde{y}+\alpha+\sum y_i - 1}e^{-(\beta+n+1)\theta}d\theta\\
&\\
&= \dfrac{C\Gamma\left(\tilde{y}+\alpha+\sum y_i\right)}{\Gamma\left(\tilde{y}+1\right)(\beta+n+1)^{\tilde{y}+\alpha+\sum y_i}}\int\dfrac{(\beta+n+1)^{\tilde{y}+\alpha+\sum y_i}}{\Gamma\left(\tilde{y}+\alpha+\sum y_i\right)}\theta^{\tilde{y}+\alpha+\sum y_i - 1}e^{-(\beta+n+1)\theta)}d\theta\\
&\\
&= C\cdot\dfrac{\Gamma\left(\tilde{y}+\alpha+\sum y_i\right)}{\Gamma\left(\tilde{y}+1\right)(\beta+n+1)^{\tilde{y}+\alpha+\sum y_i}}\\
&\\
&= \dfrac{\prod_{i=1}^n y_i!(\beta+n)^{\alpha+\sum y_i}}{\Gamma(\alpha+\sum y_i)}\cdot\dfrac{\Gamma\left(\tilde{y}+\alpha+\sum y_i\right)}{\Gamma\left(\tilde{y}+1\right)(\beta+n+1)^{\tilde{y}+\alpha+\sum y_i}}\\
&\\
&= \prod_{i=1}^n y_i! \cdot \dfrac{\Gamma\left(\tilde{y}+\alpha+\sum y_i\right)}{\Gamma(\tilde{y}+1)\Gamma(\alpha+\sum y_i)}\cdot \left(\dfrac{\beta+n}{\beta+n+1}\right)^{\alpha+\sum y_i} \cdot \left(\dfrac{1}{\beta+n+1}\right)^{\tilde{y}}\\
\end{flalign*}
\textcolor{red}{Hoff p.47:
\begin{itemize}
\item $b$ is interpreted as the number of prior observations
\item $a$ is interpreted as the sum of counts from $b$ prior observations
\end{itemize}
}
\textcolor{red}{ Hoff p. 49 (Birth rate example): $a = 2, b = 1$. }
\subsubsection{R Implementation}
This result has been used to create standard format R functions dpredPG(), ppredPG(), and rpredPG() for the Poisson-Gamma distribution for density, cumulative probability, and random sampling, respectively (see appendix). These functions are exercised in the following example.\\
Developing the random sample function rpredPG(): I need to establish the support of the predictive distribution $f_x$ from which to sample. the uniroot() function is not working because it keeps feeding non-integer values to dnbinom(). Strategy: a modified bisection method as follows:\\
\begin{enumerate}
\item set a desired tolerance $\epsilon$.
\item Find the expected value $E_x$ (closed formula, see above).
\item Step to the right of $E_x$ by whole integers, in the sequence $E_x + \{1,2,4,...2^n\}$, stopping at $U=f_x\left(E_x + 2^n\right) < 0$. This is the upper bound for the bisection method.
\item Bisect the interval, rounding to the nearest integer. Call the resulting mid-interval number $B$.
\item If B is positive, test whether $0 \leq f_x(B) \leq \epsilon$. If so, DONE. If not:
\item Establish new interval, choosing endpoints from $E_x$, $B$, and $U$ so that the interval straddles $0$, and repeat the steps until the condition in step 5 is reached.
\end{enumerate}
\subsubsection{Example}
Suppose we have 10 prior observations with counts 27, 79, 21, 100, 8, 4, 37, 15, 3, 97. Let $\alpha = 11$ and $\beta = 3$. For $\tilde{y} = 1:100$ possible future occurrences, the figures below show the predictive distribution from dpredPG(), the cumulative distribution from ppredPG(), and a histogram of random draws from rpredPG().
\includegraphics{Thesis-004}
\clearpage
\subsection{Normal Observation with Normal-Inverse Gamma Prior}
\subsubsection{One sample}
\paragraph{Derivation}
[Hoff p. 69ff]\\
Let $\left\{Y_1,...,Y_n|\theta,\sigma^2\right\}\overset{i.i.d.}{\sim}N\left(\theta,\sigma^2\right)$. Then the joint sampling density is
\begin{flalign*}
p\left(y_1,...,y_n|\theta,\sigma^2\right)
&= \prod_{i=1}^n p\left(y_i|\theta,\sigma^2\right)\\
&\\
&= \prod_{i=1}^n \dfrac{1}{\sqrt{2\pi\sigma^2}}e^{-\frac{1}{2}\left(\frac{y_i - \theta}{\sigma}\right)^2}\\
&\\
&= \left(2\pi\sigma^2\right)^{-\sfrac{n}{2}}e^{-\frac{1}{2}\sum_{i=1}^n\left(\frac{y_i - \theta}{\sigma}\right)^2}.\\
\end{flalign*}
%It can be shown that $\left\{\sum{y_i^2},\sum{y_i}\right\}$ and hence $\left\{\bar{y},s^2\right\}$ are sufficient statistics, where $\bar{y} = \sum{y_i}/n$ and $s^2 = \sum\left(y_i - \bar{y}\right)^2/(n-1)$.\\
%\vdots
Following Hoff (p. 74ff), for joint inference on both $\theta$ and $\sigma$, assume priors
\begin{flalign*}
\frac{1}{\sigma^2} &\sim \text{gamma}\left(\sfrac{\nu_0}{2},\sfrac{\nu_0\sigma_0^2}{2}\right)\\
&\\
\theta|\sigma^2 &\sim \text{normal}\left(\mu_0,\sfrac{\sigma^2}{\kappa_0}\right)\\
\end{flalign*}
where $\left(\sigma_0^2,\nu_0\right)$ are the sample variance and sample size of prior observations, and $\left(\mu_o, \kappa_0\right)$ are the sample mean and sample size of prior observations.\\
Note: $\mu_0$, $\kappa_0$, $\nu_0$, and $\sigma_0^2$ come from prior knowledge. [in the Hoff example (Midge Wing Length), $\kappa_0$ and $\nu_0$ are both set to $1$ so that ``our prior distributions are only weakly centered around these estimates from other populations."]\\
From this we derive joint posterior distribution
\begin{flalign*}
\left\{\theta|y_1,...,y_n,\sigma^2\right\} &\sim \text{normal}\left(\mu_n,\sfrac{\sigma^2}{\kappa_n}\right)\\
&\\
\left\{\sigma^2|y_1,...,y_n\right\} &\sim \text{inverse-gamma}\left(\sfrac{\nu_n}{2},\sfrac{\sigma^2_n\nu_n}{2}\right).
\end{flalign*}
where
\begin{flalign*}
\kappa_n &= \kappa_0 + n\\
&\\
\mu_n &= \frac{\kappa_0\mu_0+n\bar{y}}{\kappa_n}\\
&\\
\nu_n &= \nu_0 + n\\
&\\
\sigma_n^2 &= \frac{1}{\nu_n}\left[\nu_0\sigma_0^2 + (n-1)s^2 + \frac{\kappa_0 n}{\kappa_n}\left(\bar{y}-\mu_0\right)^2\right].\\
\end{flalign*}
Here $\bar{y} = \frac{1}{n}\sum_{i=1}^n y_i$ is the sample mean and $s^2 = \frac{1}{n-1}\sum_{i=1}^n\left(y_i - \bar{y}\right)^2$ is the sample variance.\\
From the joint posterior distribution we generate marginal samples by means of the Monte Carlo method (Hoff, p. 77):
\begin{flalign*}
\begin{matrix}
\sigma^{2(1)}\sim \text{inverse-gamma}\left(\nu_n/2,\sigma^2_n\nu_n/2\right), & \theta^{(1)}\sim \text{normal}\left(\mu_n,\sigma^{2(1)}/\kappa_n\right) \\
\vdots & \vdots \\
\sigma^{2(S)}\sim \text{inverse-gamma}\left(\nu_n/2,\sigma^2_n\nu_n/2\right), & \theta^{(S)}\sim \text{normal}\left(\mu_n,\sigma^{2(S)}/\kappa_n\right) \\
\end{matrix}
\end{flalign*}
For prediction of future $\tilde{y}|y_1,...,y_n,\theta,\sigma^2$, generate $\tilde{y}_i \sim \text{normal}\left(\theta^{(i)},\sigma^{2(i)}\right)$.\\
For prediction without the influence of any previous knowledge (Hoff p. 79), we can employ Jeffreys prior $\tilde{p}\left(\theta,\sigma^2\right) = 1/\sigma^2$. This leads to the same conditional distribution for $\theta$ but a gamma$\left(\frac{n-1}{2},\frac{1}{2}\sum\left(y_i - \bar{y}\right)^2\right)$ distribution for $1/\sigma^2$. This joint posterior distribution can be used to predict future $\tilde{y}$ by first drawing $\theta,\sigma^2$ and then simulating $\tilde{y}\sim\text{normal}\left(\theta,\sigma^2\right)$. Alternatively, the joint posterior can be integrated to show that
$$\dfrac{\theta-\bar{y}}{s/\sqrt{n}}|y_1,...,y_n\sim t_{n-1}.$$
The resulting predictive distribution for $\tilde{y}$ is a t-distribution with location $\bar{y}$ and scale $s\sqrt{1+1/n}$ and $n-1$ degrees of freedom (Gelman et. al. p. 66).
\paragraph{R Implementation}
Standard format R functions dpredNormIG(), ppredNormIG(), and rpredNormIG() have been created for the Normal-Inverse Gamma distribution for density, cumulative probability, and random sampling, respectively (see appendix). These functions all include options for implementation with or without previous knowledge as desired. If Jeffreys prior is used, the functions simply implement R's Student's t-distribution functions rt(), dt(), and pt(), applying the location and scale parameters as described above. For predictions using previous knowledge, the functions work as follows: For the random sampler rpredNormIG(), the Monte-Carlo method described above is directly employed. The predictive density and cumulative predictive density functions (dpredNormIG() and ppredNormID(), respectively) depend on the random sample. ppredNormIG() utilizes the empirical cumulative density function ecdf() from R's stats package. dpredNormIG() utilizes a Kernel Density Estimation (KDE) method and R's built-in density() function. The KDE is computed by definition, using a normal kernel:
$$\hat{f}_K(x) = \frac{1}{n}\sum_{i=1}^n\frac{1}{h}K\left(\frac{x-X_i}{h}\right),$$
where
\begin{flalign*}
X_i & \text{ is the random sample generated using rpredNormIG()}\\
&\\
K & \text{ is Normal(0,1)}\\
&\\
h & \text{ is the bandwidth from R's density() function (that is, } h = \text{ density}(X_i)\text{\$bw)}\\
\end{flalign*}
These functions are exercised in the following example.\\
\paragraph{Example}
\textit{Example (Hoff p. 72ff, using data from Grogan and Wirth (1981)): Midge wing length}\\
Grogan and Wirth (1981) provide 9 measurements of midge wing length, in millimeters: $y = \{1.64, 1.7, 1.72, 1.74, 1.82, 1.82, 1.82, 1.90, 2.08\}$. Previous studies suggest values $\mu_0 = 1.9$ and $\sigma_0^2 = 0.01$. We choose $\kappa_0 = \nu_0 = 1$ ``...so that our prior distributions are only weakly centered around these estimates from other populations" (Hoff p. 76). We compute
\begin{flalign*}
\bar{y} &= 1.804\\
&\\
\text{var}(y) &= 0.0169\\
&\\
\kappa_n &= 1 + 9 = 10\\
&\\
\mu_n &= \frac{1 \cdot 1.9 + 9 \cdot 1.804}{10} = 1.814\\
&\\
\nu_n &= 1 + 9 = 10\\
&\\
\sigma_n^2 &= \frac{1}{10}\left[1 \cdot 0.01 + (9-1) \cdot 0.0169 + \frac{1 \cdot 9}{10}\left(1.804 - 1\right)^2\right] = 0.0153\\
\end{flalign*}
Thus $\sfrac{\nu_n}{2} = 5$ and $\sfrac{\nu_n\sigma_n^2}{2} = 0.7662$ and we have posteriors
\begin{flalign*}
\left\{\theta|y_1,...,y_n,\sigma^2\right\} &\sim \text{normal}\left(1.814,\sfrac{\sigma^2}{10}\right)\\
&\\
\left\{\sigma^2|y_1,...,y_n\right\} &\sim \text{inverse-gamma}(5,0.7662)\\
\end{flalign*}
The plot below illustrates the influence of previous knowledge of the population mean, and compares to the predictions resulting from Jeffreys prior.
\includegraphics{Thesis-005}
\clearpage
\subsubsection{Two samples}
\paragraph{Derivation}
For a Bayesian analysis comparing two groups we use the following sampling model (Hoff p. 127):
\begin{flalign*}
Y_{i,1} &= \mu + \delta + \epsilon_{i,1}\\
Y_{i,2} &= \mu - \delta + \epsilon_{i,2}\\
\left\{\epsilon_{i,j}\right\} &\sim\text{i.i.d. normal}\left(0,\sigma^2\right).
\end{flalign*}
Letting $\theta_1 = \mu + \delta$ and $\theta_2 = \mu - \delta$ we see that $\delta = \left(\theta_1 - \theta_2\right)/2$ is half the population difference in means, and $\mu = \left(\theta_1 + \theta_2\right)/2$ is the pooled average. We'll assume conjugate prior distributions
\begin{flalign*}
p\left(\mu,\delta,\sigma^2\right) &= p(\mu) \times p(\delta) \times p\left(\sigma^2\right)\\
\mu &\sim \text{normal}\left(\mu_0,\gamma^2_0\right)\\
\delta &\sim \text{normal}\left(\delta_0,\tau^2_0\right)\\
\sigma^2 &\sim \text{inverse-gamma}\left(\nu_0/2,\nu_0\sigma^2_0/2\right),
\end{flalign*}
\noindent where $\nu_0$ as before is the assumed prior sample size. The full conditional distributions follow:\\
\indent $\left\{\mu|\mathbf{y}_1,\mathbf{y}_2,\delta,\sigma^2\right\} \sim \text{normal}\left(\mu_n,\gamma^2_n\right)$, where
\begin{flalign*}
\mu_n &= \gamma^2_n \times \left[\dfrac{\mu_0}{\gamma^2_0} + \dfrac{\sum_{i=1}^{n_1}\left(y_{i,1}-\delta\right) + \sum_{i=1}^{n_2}\left(y_{i,2}+\delta\right)}{\sigma^2}\right]\\
&\\
\gamma^2_n &=\left[\dfrac{1}{\gamma^2_0} + \dfrac{\left(n_1 + n_2\right)}{\sigma^2}\right]^{-1}
\end{flalign*}
\indent $\left\{\delta|\mathbf{y}_1,\mathbf{y}_2,\mu,\sigma^2\right\} \sim \text{normal}\left(\delta_n,\tau^2_n\right)$, where
\begin{flalign*}
\delta_n &= \tau^2_n \times \left[\dfrac{\delta_0}{\tau^2_0} + \dfrac{\sum_{i=1}^{n_1}\left(y_{i,1}-\mu\right) - \sum_{i=1}^{n_2}\left(y_{i,2}-\mu\right)}{\sigma^2}\right]\\
&\\
\tau^2_n &=\left[\dfrac{1}{\tau^2_0} + \dfrac{\left(n_1 + n_2\right)}{\sigma^2}\right]^{-1}
\end{flalign*}
\indent $\left\{\sigma^2|\mathbf{y}_1,\mathbf{y}_2,\mu,\delta\right\} \sim \text{inverse-gamma}\left(\frac{\nu_n}{2},\frac{\nu_n\sigma^2_n}{2}\right)$, where
\begin{flalign*}
\nu_n &= \nu_0 + n_1 + n_2\\
\\
\nu_n\sigma^2_n &= \nu_0\sigma^2_0 + \sum_{i=1}^{n_1}\left(y_{i,1} - [\mu + \delta]\right)^2 + \sum_{i=1}^{n_2}\left(y_{i,2} - [\mu - \delta]\right)^2\\
\end{flalign*}
\paragraph{R Implementation}
The standard format R function rpredNormIG2() implements a Gibbs sampler to approximate the posterior distribution $p\left(\mu,\delta,\sigma^2|\mathbf{y}_1,\mathbf{y}_2\right)$, from which to generate predictions for the two populations as follows:
\begin{enumerate}
\item Set initial values $\mu = \frac{\theta_1 + \theta_2}{2}$ and $\delta = \frac{\theta_1 - \theta_2}{2}$
\item Generate a single $\sigma^2|\mathbf{y_1},\mathbf{y_2},\mu,\delta$
\item Generate a single $\mu|\mathbf{y_1},\mathbf{y_2},\delta,\sigma^2$
\item Generate a single $\delta|\mathbf{y_1},\mathbf{y_2},\mu,\sigma^2$
\item Predict $\tilde{y}_1\sim \text{normal}\left(\mu+\delta,\sigma^2\right)$ and $\tilde{y}_2\sim \text{normal}\left(\mu-\delta,\sigma^2\right)$
\end{enumerate}
The user provides the two samples $\mathbf{y_1}$ and $\mathbf{y_2}$ along with values for $\mu_0, \sigma^2_0, \delta_0, \tau^2_0, \nu_0$, and desired prediction sample size $N$. The function returns $N$ predictions for each population and the vectors of generated values for $\mu$, $\delta$, and $\sigma^2$.
\paragraph{Example}
Hoff p. 128-129 \textit{Analysis of math score data}\\
Math score data for two schools were based on results of a national exam in the United States, standardized to produce a nationwide mean of 50 and a standard deviation of 10. Unless the two schools were known in advance to be extremely exceptional, reasonable prior parameters can be based on this information. For the prior distributions of $\mu$ and $\sigma^2$, we'll take $\mu_0 = 50$ and $\sigma^2_0 = 10^2 = 100$, although this latter value is likely to be an overestimate of the within-school sampling variability. We'll make these prior distributions somewhat diffuse, with $\gamma^2_0 = 25^2 = 625$ and $\nu_0 = 1$. For the prior distribution on $\delta$, choosing $\delta_0 = 0$ represents the prior opinion that $\theta_1 > \theta_2$ and $\theta_2 > \theta_1$ are equally probable. Finally, since the scores are bounded between 0 and 100, half the difference between $\theta_1$ and $\theta_2$ must be less than 50 in absolute value, so a value of $\tau^2_0 = 25^2 = 625$ seems reasonably diffuse.\\\\
The results of a call to rpredNormIG2$\left(\mathbf{y}_1,\mathbf{y}_2,\mu_0,\sigma^2_0,\delta_0,\tau^2_0,N\right)$ are summarized in the following plot.\\
\includegraphics{Thesis-006}
\subsubsection{$k$ samples: Comparing multiple groups}
For two-level data consisting of groups and units within groups, denote $y_{i,j}$ as the data on the $i$th unit in group $j$. We have the hierarchical normal model (Hoff p. 132ff):
$$\phi_j = \left\{\theta_j,\sigma^2\right\}, p\left(y|\phi_j\right) = \text{normal}\left(\theta_j,\sigma^2\right) \text{ (within-group model)}$$
$$\psi_j = \left\{\mu,\tau^2\right\}, p\left(\theta_j|\psi\right) = \text{normal}\left(\mu,\tau^2\right) \text{ (between-group model)}$$
We use standard semiconjugate normal and inverse-gamma prior distributions for the fixed but unknown parameters in the model:
\begin{flalign*}
\sigma^2 &\sim \text{inverse-gamma}\left(\frac{\nu_0}{2},\frac{\nu_0\sigma^2_0}{2}\right)\\
&\\
\tau^2 &\sim \text{inverse-gamma}\left(\frac{\eta_0}{2},\frac{\eta_0\tau^2_0}{2}\right)\\
&\\
\mu &\sim \text{normal}\left(\mu_0,\gamma^2_0\right)\\
\end{flalign*}
\paragraph{Derivation}
As with the two-sample problem, joint posterior inferences for the unknown parameters can be made by constructing a Gibbs sampler to approximate the posterior distribution $p\left(\theta_1,...,\theta_m,\mu,\tau^2,\sigma^2|\mathbf{y}_1,...,\mathbf{y}_m\right)$. For this we need the full conditional distribution of each parameter (Hoff pp. 134-135):
%%$$\left\{\mu|\theta_1,...,\theta_m,\tau^2\right\} \sim \text{normal}\left(\dfrac{m\bar{\theta}/\tau^2 + \mu_0/\gamma^2_0}{m/\tau^2 + 1/\gamma^2_0},\dfrac{1}{m/\tau^2+1/\gamma^2_0}\right)$$
$$\left\{\mu|\theta_1,...,\theta_m,\tau^2\right\} \sim \text{normal}\left(\dfrac{\frac{m\bar{\theta}}{\tau^2} + \frac{\mu_0}{\gamma^2_0}}{\frac{m}{\tau^2} + \frac{1}{\gamma^2_0}},\dfrac{1}{\frac{m}{\tau^2}+\frac{1}{\gamma^2_0}}\right)$$
$$\left\{\tau^2|\theta_1,...,\theta_m,\mu\right\} \sim \text{inverse-gamma}\left(\dfrac{\eta_0 + m}{2},\dfrac{\eta_0\tau^2_0 + \sum\left(\theta_j-\mu\right)^2}{2}\right)$$
$$\left\{\theta_j|y_{1,j},...,y_{n,j},\sigma^2\right\} \sim \text{normal}\left(\dfrac{\frac{n_j\bar{y}_j}{\sigma^2} + \frac{1}{\tau^2}}{\frac{n_j}{\sigma^2}+\frac{1}{\tau^2}},\dfrac{1}{\frac{n_j}{\sigma^2}+\frac{1}{\tau^2}}\right)$$
$$\left\{\sigma^2|\mathbf{\theta,y_1,...,y_n}\right\} \sim \text{inverse-gamma}\left(\dfrac{1}{2}\left[\nu_0 + \sum_{j=1}^m n_j\right],\dfrac{1}{2}\left[\nu_0\sigma^2_0 + \sum_{j=1}^m\sum_{i=1}^{n_j}\left(y_{i,j}-\theta_j\right)^2\right]\right).$$
Note that $\sum\sum\left(y_{i,j}-\theta_j\right)^2$ is the sum of squared residuals across all groups, conditional on the within-group means, and so the conditional distribution concentrates probability around a pooled-sample estimate of the variance.
\paragraph{R Implementation}
The standard format R function rpredNormIGk() implements a Gibbs sampler for posterior approximation of each unknown quantity by sampling from its full conditional distribution. From these posteriors, predictions are generated, as follows:
\begin{enumerate}
\item Set prior parameter values:
\begin{flalign*}
\nu_0,\sigma^2_0 \text{ for } p\left(\sigma^2\right)\\
\eta_0,\tau^2_0 \text{ for } p\left(\tau^2\right)\\
\mu_0,\gamma^2_0 \text{ for } p\left(\mu\right).
\end{flalign*}
\item Set initial states for the unknown parameters:
\begin{flalign*}
\theta_1^{(1)} &= \mathbf{\bar{y}_1},...,\theta_m^{(1)} = \mathbf{\bar{y}_m}\\
\mu^{(1)} &= \text{mean}\left(\theta_1^{(1)},...,\theta_m^{(1)}\right)\\
\tau^{2(1)} &= \text{var}\left(\theta_1^{(1)},...,\theta_m^{(1)}\right)\\
\sigma^{2(1)} &= \text{mean}\left(\text{var}\left(\mathbf{y}_1\right),...,\text{var}\left(\mathbf{y}_m\right)\right))
\end{flalign*}
\item For $s\in\{1,...,S\}$, sample
\begin{enumerate}
\item $\mu^{(s+1)} \sim p\left(\mu|\theta_1^{(s)},...,\theta_m^{(s)},\tau^{2(s)}\right)$
\item $\tau^{2(s+1)} \sim p\left(\tau^2|\theta_1^{(s)},...,\theta_m^{(s)},\mu^{(s+1)}\right)$
\item $\sigma^{2(s+1)} \sim p\left(\sigma^2|\theta_1^{(s)},...,\theta_m^{(s)},\mathbf{y}_1,...,\mathbf{y}_m\right)$
\item $\theta_j^{(s+1)} \sim p\left(\theta_j|\mu^{(s+1)},\tau^{2(s+1)},\sigma^{2(s+1)},\mathbf{y}_j\right)$ for $j \in \{1,...,m\}$
\end{enumerate}
\item For $s\in\{1,...,S\}$, generate prediction $\tilde{y}_j^{(s)} \sim \text{normal}\left(\theta_j^{(s)},\sigma^{2(s)}\right)$
\end{enumerate}
\paragraph{Example}
Returning to the math scores example, data for 10th-grade students from 100 large urban schools (each having 10th-grade enrollment of at least 400) is summarized in the following plots.
\includegraphics{Thesis-007}
\includegraphics{Thesis-008}
% The prior parameters that need to be specified are
% \begin{flalign*}
% \left(\nu_0,\sigma^2_0\right) \text{ for } p\left(\sigma^2\right)\\
% \left(\eta_0,\tau^2_0\right) \text{ for } p\left(\tau^2\right)\\
% \left(\mu_0,\gamma^2_0\right) \text{ for } p\left(\mu\right).
% \end{flalign*}
% \begin{flalign*}
% \nu_0,\sigma^2_0 \text{ for } p\left(\sigma^2\right)\\
% \eta_0,\tau^2_0 \text{ for } p\left(\tau^2\right)\\
% \mu_0,\gamma^2_0 \text{ for } p\left(\mu\right).
% \end{flalign*}
For prediction, we'll use the following prior values (Hoff p. 137):
\begin{flalign*}
\sigma^2_0&: 100 \text{ (within-school variance)}\\
\nu_0&: 1 \text{ (prior sample size)}\\
\tau^2_0&: 100 \text{ (between-school variance)}\\
\eta_0&: 1 \text{ (prior sample size)}\\
\mu_0&: 50 \text{ (prior mean of school means)}\\
\gamma^2_0&: 25 \text{ (prior variance of school means)}
\end{flalign*}
\textcolor{red}{Below: Pick a couple of schools that show different relationships between teh data and the prediction}
\includegraphics{Thesis-009}
\paragraph{Ranking Treatments}
\clearpage
\section{Chapter 2: Normal Regression with Zellner's $g$-prior}
\subsection{Least Squares Estimation with Example (Hoff p. 149ff.)}
Regression modeling is concerned with describing how the sampling distribution of one random variable $Y$ varies with another variable or set of variables $\mathbf{x} = \left(x_1,...,x_p\right)$. Specifically, a regression model postulates a form for $p(y|\mathbf{x})$, the conditional distribution of $Y$ given $\mathbf{x}$. Estimation of $p(y|\mathbf{x})$ is made using data $y_1,...,y_n$ that are gathered under a variety of conditions $\mathbf{x}_1,...,\mathbf{x}_n$.
The normal linear regression model specifies that, in addition to $E[Y|\mathbf{x}]$ being linear, the sampling variability around the mean is i.i.d. normal:
\begin{flalign*}
\epsilon_1,...,\epsilon_n &\overset{\text{i.i.d}}{\sim} \text{normal}\left(0,\sigma^2\right)\\
Y_i &= \boldsymbol\beta^T \mathbf{x}_i + \epsilon_i
\end{flalign*}
This model provides a complete specification of the joint probability density of observed data $y_1,...,y_n$ conditional upon $\mathbf{x}_1,...,\mathbf{x}_n$ and values of $\boldsymbol\beta$ and $\sigma^2$:
\begin{flalign}
p\left(y_1,...y_n|\mathbf{x}_1,...,\mathbf{x}_n,\boldsymbol\beta,\sigma^2\right) &= \prod_{i=1}^n p\left(y_i|\mathbf{x}_i,\boldsymbol\beta,\sigma^2\right) \nonumber\\
&= \left(2\pi\sigma^2\right)^{-n/2}\text{exp}\left\{-\frac{1}{2\sigma^2}\sum_{i=1}^n\left(y_i - \boldsymbol\beta^T\mathbf{x}_i\right)^2\right\} \label{conditional_density}
\end{flalign}
Another way to write this joint probability density is in terms of the multivariate normal distribution: Let $\mathbf{y}$ be the $n$-dimensional column vector $\left(y_1,...,y_n\right)^T$ and let $\mathbf{X}$ be the $n \times p$ matrix whose $i$th row is $\mathbf{x}_i = \left\{ x_{i,1}, x_{i,2}, ..., x_{i,p} \right\}$. Then the normal regression model is
$$\{\mathbf{y}|\mathbf{X},\boldsymbol\beta,\sigma^2\} \sim \text{multivariate normal}\left(\mathbf{X}\boldsymbol\beta,\sigma^2\mathbf{I}\right),$$
where $\mathbf{I}$ is the $p \times p$ identity matrix and
\begin{equation*}
\mathbf{X}\boldsymbol\beta =
\begin{pmatrix}
\mathbf{x}_1 \\
\mathbf{x}_2 \\
\vdots \\
\mathbf{x}_n
\end{pmatrix}
\begin{pmatrix}
\beta_1 \\
\beta_2 \\
\vdots \\
\beta_p
\end{pmatrix}
=
\begin{pmatrix}
\beta_1 x_{1,1} + \cdots + \beta_p x_{1,p} \\
\vdots \\
\beta_1 x_{n,1} + \cdots + \beta_p x_{n,p} \\
\end{pmatrix}
=
\begin{pmatrix}
E\left[Y_1|\mathbf{\boldsymbol\beta},\mathbf{x}_1\right] \\
\vdots \\
E\left[Y_n|\mathbf{\boldsymbol\beta},\mathbf{x}_n\right] \\
\end{pmatrix}
\end{equation*}
The density (\ref{conditional_density}) depends on $\boldsymbol\beta$ through the residuals $\left(y_i - \boldsymbol\beta^T\mathbf{x}_i\right)$. We compute the ordinary least squares estimates
$$\hat{\boldsymbol\beta}_{ols} = \left(\mathbf{X}^T\mathbf{X}\right)^{-1}\mathbf{X}^T\mathbf{y}$$
and
$$\hat{\sigma}^2_{ols} = \frac{SSR\left(\hat{\boldsymbol\beta}_{ols}\right)}{(n-p)} = \frac{\sum\left(y_i - \hat{\boldsymbol\beta}_{ols}^T x_i\right)^2}{(n-p)}.$$
\clearpage
\textit{Example: Oxygen uptake (from Kuehl (2000), Hoff p. 149ff)}
Twelve healthy men who did not exercise regularly were recruited to take part in a study of the effects of two different exercise regimens on oxygen uptake. Six of the twelve men were randomly assigned to a 12-week flat-terrain running program, and the remaining six were assigned to a 12-week step aerobics program. The maximum oxygen uptake of each subject was measured (in liters per minute) while running on an inclined treadmill, both before and after the 12-week program. Of interest is how a subject's change in maximal oxygen uptake may depend on which program they were assigned to. However, other factors, such as age, are expected to affect the change in maximal uptake as well. The results are shown here:
\includegraphics{Thesis-010}
Hoff's regression model:
\begin{align}
Y_i &= \beta_1x_{i,1} + \beta_2x_{i,2} + \beta_3x_{i,3} + \beta_4x_{i,4} + \epsilon_i, \text{ where} \label{example_model}\\
x_{i,1} &= 1 \text{ for each subject } i \nonumber \\
x_{i,2} &= 0 \text{ if subject } j \text{ is on the running program, } 1 \text{ if on aerobic} \nonumber \\
x_{i,3} &= \text{ age of subject } i \nonumber \\
x_{i,4} &= x_{i,2} \times x_{i,3} \nonumber
\end{align}
Under this model the conditional expectations of $Y$ for the two different levels of $x_{i,1}$ are
\begin{flalign*}
E[Y|\mathbf{x}] &= \beta_1 + \beta_3 \times (age) \text{ if } x_1 = 0, \text{ and}\\
E[Y|\mathbf{x}] &= \left(\beta_1 + \beta_2\right) + \left(\beta_3 + \beta_4\right) \times (age) \text{ if } x_1 = 1
\end{flalign*}
In other words, the model assumes that the relationship is linear in age for both exercise groups, with the difference in intercepts given by $\beta_2$ and the difference in slopes given by $\beta_4$. If we assumed that $\beta_2 = \beta_4 = 0$, then we would have identical lines for both groups. If we assumed $\beta_2 \ne 0$ and $\beta_4 = 0$ then we would have a different line for each group but they would be parallel. Allowing all coefficients to be non-zero gives us two unrelated lines. Some different possibilities are depicted graphically below:\\\\
\includegraphics{Thesis-011}
Let's find the least squares regression estimates for the model (\ref{example_model}), and use the results to evaluate the differences between the two exercise groups. The ages of the 12 subjects, along with their observed changes in maximal oxygen uptake, are
\begin{flalign*}
\mathbf{x}_3 &= (23,22,22,25,27,20,31,23,27,28,22,24)\\
\mathbf{y} &= (-0.87,-10.74,-3.27,-1.97,7.50,-7.25,17.05,4.96,10.40,11.05,0.26,2.51),
\end{flalign*}
\noindent with the first six elements of each vector corresponding to the subjects in the running group and the latter six corresponding to subjects in the aerobics group. After constructing the $12 \times 4$ matrix $\mathbf{X} = (\mathbf{x}_1\, \mathbf{x}_2\, \mathbf{x}_3\, \mathbf{x}_4)$, the matrices $\mathbf{X}^T\mathbf{X}$ and $\mathbf{X}^T\mathbf{y}$ can be computed, from which we get $\boldsymbol\beta_{ols} = (-51.29,13.11,2.09,-0.32)^T$:\\
This means that the estimated linear relationship between uptake and age has an intercept and slope of -51.29 and 2.09 for the running group, and -51.29 + 13.11 = -38.18 and 2.09 - 0.32 = 1.77 for the aerobics group. These two lines are plotted in the fourth panel of Figure XX. We obtain unbiased estimate $\sigma^2 = SSR(\hat{\boldsymbol\beta}_{ols})/(n-p) = 8.54$, and use this to compute the standard error of the components of $\hat{\boldsymbol\beta}_{ols}$, which are 12.25, 15.76, 0.53, and 0.65, respectively. Comparing the values of $\hat{\boldsymbol\beta}_{ols}$ to their standard errors suggests that the evidence for differences between the two exercise regimens is not very strong.
\hrulefill \\
\textcolor{red}{"Comparing the values of $\hat{\boldsymbol\beta}_{ols}$ to their standard errors:"}\\
Diference in Intercept:\\
$$H_0: Intercept_{running} - Intercept_{aerobic} = 0; H_A: Intercept_{running} - Intercept_{aerobic} \ne 0$$
$$H_0: \beta_1 - (\beta_1 + \beta_2) = -\beta_2 = 0 \text{ (that is } \beta_2 = 0); H_A: \beta_2 \ne 0$$
$$T = \frac{\beta_2 - 0}{SE_{beta_2}} = \frac{13.11}{15.76} = 0.49$$
$$\longrightarrow p = 0.79\longrightarrow \text{ fail to reject } H_0 \text{ and conclude no significant difference in intercept}$$
Difference in Slope:\\
$$H_0: Slope_{running} - Slope_{aerobic} = 0; H_A: Slope_{running} - Slope_{aerobic} \ne 0$$
$$H_0: \beta_3 - (\beta_3 + \beta_4) = 0 \text{ (that is } \beta_4 = 0); H_A: \beta_4 \ne 0$$
$$T = \frac{\beta_4 - 0}{SE_{beta_4}} = \frac{-0.32}{0.65} = 0.83$$
$$\longrightarrow p = 0.68\longrightarrow \text{ fail to reject } H_0 \text{ and conclude no significant difference in slope}$$
\hrulefill \\
\begin{Schunk}
\begin{Sinput}
> n <- length(y)
> x1 <- rep(1,n)
> x4 <- x2*x3
> X = cbind(x1,x2,x3,x4)
> p = ncol(X)
> beta.ols<- solve(t(X)%*%X)%*%t(X)%*%y
> sig2.ols = (t(y-X%*%beta.ols)%*%(y-X%*%beta.ols)/(n-p))[1]
> sig2.ols
\end{Sinput}
\begin{Soutput}
[1] 8.542477
\end{Soutput}
\begin{Sinput}
> #sampling variance-covariance matrix of beta.ols:
> SIG2.ols = solve(t(X)%*%X)*sig2.ols
> #standard errors for the components of beta.ols:
> SE.ols = sqrt(diag(SIG2.ols))
> Score = beta.ols/SE.ols
> CIL = beta.ols - SE.ols*qt(.975,10)
> CIU = beta.ols + SE.ols*qt(.975,10)
> betadata = cbind(beta.ols,SE.ols,CIL,CIU)
> colnames(betadata) = c("beta.ols","SE.ols","CIL","CIU")
> betadata
\end{Sinput}
\begin{Soutput}
beta.ols SE.ols CIL CIU
x1 -51.2939459 12.2522126 -78.5935768 -23.994315
x2 13.1070904 15.7619762 -22.0127811 48.226962
x3 2.0947027 0.5263585 0.9219028 3.267503
x4 -0.3182438 0.6498086 -1.7661075 1.129620
\end{Soutput}
\end{Schunk}
\clearpage
\subsection{Bayesian Estimation for a Regression Model (Hoff p. 154ff)}
\subsubsection{Derivation}
\paragraph{A semiconjugate prior distribution}
Hoff proposes a semiconjugate prior distribution for $\boldsymbol\beta$ and $\sigma^2$ to be used when there is information available about the parameters. The sampling density of the data (Equation \ref{conditional_density}) is
$$p(\mathbf{y}|\mathbf{X},\boldsymbol\beta,\sigma^2) \propto \text{exp}\{-\frac{1}{2\sigma^2}\text{SSR}(\boldsymbol\beta)\} = \text{exp}\{-\frac{1}{2\sigma^2}[\mathbf{y}^T\mathbf{y} - 2\boldsymbol\beta^T\mathbf{X}^T\mathbf{y}+\boldsymbol\beta^T\mathbf{X}^T\mathbf{X}\boldsymbol\beta]\}.$$
The role that $\boldsymbol\beta$ plays in the exponent looks very similar to that played by $\mathbf{y}$, and the distribution of $\mathbf{y}$ is multivariate normal. This suggests that a multivariate normal prior distribution for $\boldsymbol\beta$ is conjugate: if $\boldsymbol\beta \sim \text{multivariate normal}(\boldsymbol\beta_0,\Sigma_0)$, then
\begin{flalign*}
p&(\boldsymbol\beta|\mathbf{y,X},\sigma^2)\\
&\propto p(\mathbf{y}|\mathbf{X},\boldsymbol\beta, \sigma^2) \times p(\boldsymbol\beta)\\
&\propto \text{exp}\{-\frac{1}{2}(-2\boldsymbol\beta^T\mathbf{X}^T\mathbf{y}/\sigma^2 + \boldsymbol\beta^T\mathbf{X}^T\mathbf{X}\boldsymbol\beta/\sigma^2) - \frac{1}{2}(-2\boldsymbol\beta^T\Sigma_0^{-1}\boldsymbol\beta_0 + \boldsymbol\beta^T\Sigma_0^{-1}\boldsymbol\beta)\}\\
&=\text{exp}\{\boldsymbol\beta^T(\Sigma_0^{-1}\boldsymbol\beta_0 + \mathbf{X}^T\mathbf{y}/\sigma^2) - \frac{1}{2}\boldsymbol\beta^T(\Sigma_0^{-1} + \mathbf{X}^T\mathbf{X}/\sigma^2)\boldsymbol\beta\}
\end{flalign*}
This is proportional to a multivariate normal density, with
\begin{flalign}
Var[\boldsymbol\beta|\mathbf{y,X},\sigma^2] &= (\Sigma_0^{-1} + \mathbf{X}^T\mathbf{X}/\sigma^2)^{-1} \label{semiconj_var}\\
\text{E}[\boldsymbol\beta|\mathbf{y,X},\sigma^2] &= (\Sigma_0^{-1} + \mathbf{X}^T\mathbf{X}/\sigma^2)^{-1} (\Sigma_0^{-1}\boldsymbol\beta_0 + \mathbf{X}^T\mathbf{y}/\sigma^2). \label{semiconj_expec}
\end{flalign}
As usual, we can gain some understanding of these formulae by considering some limiting cases. If the elements of the prior precision matrix $\Sigma_0^{-1}$ are small in magnitude, then the conditional expectation E$[\boldsymbol\beta|\mathbf{y,X},\sigma^2]$ is approximately equal to $(\mathbf{X}^T\mathbf{X})^{-1}\mathbf{X}^T\mathbf{y}$, the least squares estimate. On the other hand, if the measurement precision is very small ($\sigma^2$ is very large), then the expectation is approximately $\boldsymbol\beta_0$, the prior expectation.\\
As in most normal sampling problems, the semiconjugate prior distribution for $\sigma^2$ is an inverse-gamma distribution. Letting $\gamma = 1/\sigma^2$ be the measurement precision, if $\gamma \sim \text{gamma}(\nu_0/2,\nu_0\sigma^2_0/2)$, then
\begin{flalign*}
p(\gamma|\mathbf{y,X},\boldsymbol\beta) &\propto p(\gamma)p(\mathbf{y}|\mathbf{X},\boldsymbol\beta,\gamma)\\
&\propto \left[\gamma^{\nu_0/2-1}\text{exp}(-\gamma \times \nu_0\sigma^2_0/2)\right] \times
\left[\gamma^{n/2}\text{exp}(-\gamma \times \text{SSR}(\boldsymbol\beta)/2)\right]\\
&= \gamma^{(\nu_0+n)/2-1} \text{exp}(-\gamma[\nu_0\sigma^2_0 + \text{SSR}(\boldsymbol\beta)]/2),
\end{flalign*}
\noindent which we recognize as a gamma density, so that
$$\{\sigma^2|\mathbf{y,X},\boldsymbol\beta\} \sim \text{inverse-gamma}([\nu_0 + n]/2,[\nu_0\sigma^2_0 + \text{SSR}(\boldsymbol\beta)]/2).$$
\noindent Constructing a Gibbs sampler to approximate the joint posterior distribution $p(\boldsymbol\beta,\sigma^2|\mathbf{y,X})$ is then straightforward: given current values $\{\boldsymbol\beta^{(s)},\sigma^{2(s)}\}$, new values can be generated by
\begin{enumerate}
\item updating $\boldsymbol\beta$:
\begin{enumerate}
\item compute $\mathbf{V} = \text{Var}[\boldsymbol\beta|\mathbf{y,X},\sigma^{2(s)}]$ and $\mathbf{m} = \text{E}[\boldsymbol\beta|\mathbf{y,X},\sigma^{2(s)}]$
\item sample $\boldsymbol\beta^{(s+1)} \sim \text{multivariate normal}(\mathbf{m,V})$
\end{enumerate}
\item updating $\sigma^2$:
\begin{enumerate}
\item compute SSR$(\boldsymbol\beta^{(s+1)})$
\item sample $\sigma^{2(s+1)} \sim \text{inverse-gamma}([\nu_0 + n]/2,[\nu_0\sigma_0^2 + \text{SSR}(\boldsymbol\beta^{(s+1)})]/2)$.
\end{enumerate}
\end{enumerate}
\paragraph{Default and weakly informative prior distributions}
In situations where prior information is unavailable or difficult to quantify, an alternative ``default" class of prior distributions is given. Specification of the prior parameters $(\boldsymbol\beta_0, \Sigma_0)$ and $(\nu_0,\sigma^2_0)$ that represent actual prior information for a Bayesian analysis can be difficult. For a prior distribution that is not going to represent real prior information about the parameters, we choose one that is as minimally informative as possible. The resulting posterior distribution, then, will represent the posterior information of someone who began with little knowledge of the population being studied. Here we will employ Zellner's ``$g$-prior" (Zellner, 1986). We choose $\boldsymbol\beta_0 = \mathbf{0}$ and $\Sigma_0 = k(\mathbf{X}^T\mathbf{X})^{-1}, k = g\sigma^2, g > 0$, which satisfies a desired condition that the regression parameter estimation be invariant to changes in the scale of the regressors. With this, equations \ref{semiconj_var} and \ref{semiconj_expec} reduce to
\begin{flalign}
\text{Var}[\boldsymbol\beta|\mathbf{y,X},\sigma^2] &= [\mathbf{X^TX}/(g\sigma^2) + \mathbf{X^TX}/\sigma^2]^{-1} = \frac{g}{g+1}\sigma^2(\mathbf{X^TX})^{-1} \label{noninf_var}\\
\text{E}[\boldsymbol\beta|\mathbf{y,X},\sigma^2] &= [\mathbf{X^TX}/(g\sigma^2) + \mathbf{X^TX}/\sigma^2]^{-1}\mathbf{X^Ty}/\sigma^2 = \frac{g}{g+1}\sigma^2(\mathbf{X^TX})^{-1}\mathbf{X^Ty}.\label{noninf_expec}
\end{flalign}
Letting
$$\mathbf{V} = \frac{g}{g+1}\sigma^2(\mathbf{X^TX})^{-1} \text{ and } \mathbf{m} = \frac{g}{g+1}\sigma^2(\mathbf{X^TX})^{-1}\mathbf{X^Ty}$$
we arrive at posteriors
\begin{flalign}
\{\sigma^2|\mathbf{y,X}\} &\sim \text{inverse-gamma}([\nu_0 + n]/2,[\nu_0\sigma^2_0 + \text{SSR}_g]/2) \label{noninf_sig2_post}\\
\{\boldsymbol\beta|\mathbf{y,X},\sigma^2\} &\sim \text{multivariate normal}\left(\frac{g}{g+1}\hat{\boldsymbol\beta}_{ols},\frac{g}{g+1}\sigma^2[\mathbf{X^TX}]^{-1}\right).\label{noninf_beta_post}
\end{flalign}
Here $\text{SSR}_g = \mathbf{y^Ty - m^TV^{-1}m = y^T(I - }\frac{g}{g+1}\mathbf{X(X^TX)^{-1}X^T)y}$.\\
Simple Monte Carlo approximation can be used to sample from the joint posterior density $p(\sigma^2,\boldsymbol\beta|\mathbf{y,X})$ as follows. Here $g$ is typically set to the number of prior observations. Then:
\begin{enumerate}
\item sample $\sigma^2 \sim \text{inverse-gamma}([\nu_0 + n]/2,[\nu_0\sigma^2_0 + \text{SSR}_g]/2)$
\item sample $\boldsymbol\beta \sim \text{multivariate normal}\left(\frac{g}{g+1}\hat{\boldsymbol\beta}_{ols},\frac{g}{g+1}\sigma^2[\mathbf{X^TX}]^{-1}\right)$.
\end{enumerate}
\subsubsection{R Implementation}
The standard format R function rpredNormReg() approximates the joint posterior density $p(\sigma^2,\boldsymbol\beta|\mathbf{y,X})$ using one of the two methods described above, as selected by the user. The user inputs the set or sets of explanatory variables $X_{pred} = (x_{i1}, x_{i2}, x_{i3}, x_{i4})$ and the number $S$ of predicted values desired for each set. The function produces $S$ sampled values from the relevant posterior distribution of $\boldsymbol\beta$, and then computes a prediction for each beta as $y = X_{pred}\boldsymbol\beta$.
\textcolor{red}{SHOULD I PASTE THE R CODE INto THIS SECTION?}
% To use the semiconjugate prior, the user must supply values for prior parameters $(\beta_0, \Sigma_0)$ and $(\nu_0, \sigma^2_0)$.
%
% In the absence of prior information, predictions are made by means of Monte Carlo sampling, using Zellner's g-prior.
%
% \begin{enumerate}
% \item sample $\sigma^2 \sim \text{inverse-gamma}([\nu_0 + n]/2,[\nu_0\sigma^2_0 + \text{SSR}_g]/2)$
% \item sample $\beta \sim \text{multivariate normal}\left(\frac{g}{g+1}\hat{\beta}_{ols},\frac{g}{g+1}\sigma^2[\mathbf{X^TX}]^{-1}\right)$.
% \end{enumerate}
%
% Here $g$ is typically set to the number of prior observations.
\subsubsection{Example}
In the example below (Hoff data and code found \href{https://pdhoff.github.io/book/}{here}) to employ Hoff's semi-conjugate prior we use
\begin{flalign*}
\boldsymbol\beta_0 &= \hat{\boldsymbol\beta}_{ols} = (-51.29, -51.29, -51.29, -51.29) \text{ (ordinary least squares estimator of } \boldsymbol\beta \text{)}\\
\Sigma_0 &= (X^TX)^{-1}\sigma^2n =
\begin{pmatrix}
1801.4 & -1801.4 & -77.02 & 77.02 \\
-1801.4 & 2981.28 & 77.02 & -122.03 \\
-77.02 & 77.02 & 3.32 & -3.32 \\
77.02 & -122.03 & -3.32 & 5.07
\end{pmatrix}
\text{ (sampling variance of } \hat{\boldsymbol\beta}_{ols} \text{)}\text{WHY TIMES n????}\\
\nu_0 &= 1 \text{ (prior sample size)}\\
\sigma^2_0 &= \frac{\sum e_i}{n-1} = 6.21 \text{ (variance of the residuals)}\\
S &= 5000 \text{ (sample size for predictive distribution random draw)}
\end{flalign*}
\textcolor{red}{To do: clean up write-up; make plots comparing g-prior and non-g-prior results; circle back with Dean about appropriate priors for non-g-prior case; why is Hoff looking at the difference between the two cases with prior info very close to the sample characteristics? }
\textcolor{red}{ALSO INCLUDE IN WRITE-UP EXPLICIT PREDICTION STEP, E.G. "Y = ..."}
\includegraphics{Thesis-014}
%\textcolor{red}{MAYBE DO BOTH IN THE SAME BLOCK AND PLOT ON ONE PLOT?}
%\clearpage
%For the same example without prior information, we use Zellner's g-prior with $g =$ length$(y) =$ 12.
\includegraphics{Thesis-015}
\includegraphics{Thesis-016}
\clearpage
\includegraphics{Thesis-017}
\clearpage
Comparing predictions using semi-conjugate prior vs. Zellner's g-prior:
\includegraphics{Thesis-018}
\includegraphics{Thesis-019}
\textcolor{red}{EXPLAIN WHY SCP PREDICTIONS HAVE TALLER DISTRIBUTIONS THAN ZGP PREDICTIONS. ALSO WHY ZGP PREDICTIONS SHRINK TOWARD 0}
\clearpage
Comparing observed values to predictive distributions:
\includegraphics{Thesis-020}
The following plots exhibit the influence of varying the prior info with the semi-conjugate prior.
\includegraphics{Thesis-021}
\includegraphics{Thesis-022}
\includegraphics{Thesis-023}
\includegraphics{Thesis-024}
\includegraphics{Thesis-025}
\clearpage
\section{Conclusion}
\end{document}
| {
"alphanum_fraction": 0.6422327997,
"avg_line_length": 55.2870813397,
"ext": "tex",
"hexsha": "9c9b6ec735c0d49c17f62bc4dfcc9ccf044f00f9",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "61c7548b00a24d904fc040c30146c88c2080123b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vgharris3/predictiveInference",
"max_forks_repo_path": "Thesis_Files/Thesis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "61c7548b00a24d904fc040c30146c88c2080123b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vgharris3/predictiveInference",
"max_issues_repo_path": "Thesis_Files/Thesis.tex",
"max_line_length": 1091,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "61c7548b00a24d904fc040c30146c88c2080123b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vgharris3/predictiveInference",
"max_stars_repo_path": "Thesis_Files/Thesis.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 20159,
"size": 57775
} |
\title{KL(q||p) Minimization}
\subsection{$\text{KL}(q\|p)$ Minimization}
One form of variational inference minimizes the Kullback-Leibler
divergence \textbf{from} $q(\mathbf{z}\;;\;\lambda)$ \textbf{to}
$p(\mathbf{z} \mid \mathbf{x})$,
\begin{align*}
\lambda^*
&=
\arg\min_\lambda \text{KL}(
q(\mathbf{z}\;;\;\lambda)
\;\|\;
p(\mathbf{z} \mid \mathbf{x})
)\\
&=
\arg\min_\lambda\;
\mathbb{E}_{q(\mathbf{z}\;;\;\lambda)}
\big[
\log q(\mathbf{z}\;;\;\lambda)
-
\log p(\mathbf{z} \mid \mathbf{x})
\big].
\end{align*}
The KL divergence is a non-symmetric, information theoretic measure of
similarity between two probability distributions
\citep{hinton1993keeping,waterhouse1996bayesian,jordan1999introduction}.
\subsubsection{The Evidence Lower Bound}
The above optimization problem is intractable because it directly
depends on the posterior $p(\mathbf{z} \mid \mathbf{x})$. To tackle
this, consider the property
\begin{align*}
\log p(\mathbf{x})
&=
\text{KL}(
q(\mathbf{z}\;;\;\lambda)
\;\|\;
p(\mathbf{z} \mid \mathbf{x})
)\\
&\quad+\;
\mathbb{E}_{q(\mathbf{z}\;;\;\lambda)}
\big[
\log p(\mathbf{x}, \mathbf{z})
-
\log q(\mathbf{z}\;;\;\lambda)
\big]
\end{align*}
where the left hand side is the logarithm of the marginal likelihood
$p(\mathbf{x}) = \int p(\mathbf{x}, \mathbf{z}) \text{d}\mathbf{z}$,
also known as the model evidence. (Try deriving this using Bayes'
rule!)
The evidence is a constant with respect to the variational parameters
$\lambda$, so we can minimize $\text{KL}(q\|p)$ by instead maximizing
the Evidence Lower BOund,
\begin{align*}
\text{ELBO}(\lambda)
&=\;
\mathbb{E}_{q(\mathbf{z}\;;\;\lambda)}
\big[
\log p(\mathbf{x}, \mathbf{z})
-
\log q(\mathbf{z}\;;\;\lambda)
\big].
\end{align*}
In the ELBO, both $p(\mathbf{x}, \mathbf{z})$ and
$q(\mathbf{z}\;;\;\lambda)$ are tractable. The optimization problem we
seek to solve becomes
\begin{align*}
\lambda^*
&=
\arg \max_\lambda \text{ELBO}(\lambda).
\end{align*}
As per its name, the ELBO is a lower bound on the evidence, and
optimizing it tries to maximize the probability of observing the data.
What does maximizing the ELBO do? Splitting the ELBO reveals a trade-off
\begin{align*}
\text{ELBO}(\lambda)
&=\;
\mathbb{E}_{q(\mathbf{z} \;;\; \lambda)}[\log p(\mathbf{x}, \mathbf{z})]
- \mathbb{E}_{q(\mathbf{z} \;;\; \lambda)}[\log q(\mathbf{z}\;;\;\lambda)],
\end{align*}
where the first term represents an energy and the second term
(including the minus sign) represents the entropy of $q$.
The energy encourages $q$ to focus probability mass where the
model puts high probability, $p(\mathbf{x}, \mathbf{z})$.
The entropy encourages $q$ to spread probability mass to avoid
concentrating to one location.
Edward uses two generic strategies to obtain gradients for
optimization.
\begin{itemize}
\item Score function gradient;
\item Reparameterization gradient.
\end{itemize}
\subsection{Score function gradient}
Gradient descent is a standard approach for optimizing complicated
objectives like the ELBO. The idea is to calculate its gradient
\begin{align*}
\nabla_\lambda\;
\text{ELBO}(\lambda)
&=
\nabla_\lambda\;
\mathbb{E}_{q(\mathbf{z}\;;\;\lambda)}
\big[
\log p(\mathbf{x}, \mathbf{z})
-
\log q(\mathbf{z}\;;\;\lambda)
\big],
\end{align*}
and update the current set of parameters proportional to the gradient.
The score function gradient estimator leverages a property of
logarithms to write the gradient as
\begin{align*}
\nabla_\lambda\;
\text{ELBO}(\lambda)
&=\;
\mathbb{E}_{q(\mathbf{z}\;;\;\lambda)}
\big[
\nabla_\lambda \log q(\mathbf{z}\;;\;\lambda)
\:
\big(
\log p(\mathbf{x}, \mathbf{z})
-
\log q(\mathbf{z}\;;\;\lambda)
\big)
\big].
\end{align*}
The gradient of the ELBO is an expectation over the variational
model $q(\mathbf{z}\;;\;\lambda)$; the only new ingredient it requires is the
\emph{score function} $\nabla_\lambda \log q(\mathbf{z}\;;\;\lambda)$
\citep{paisley2012variational,ranganath2014black}.
We can use Monte Carlo integration to obtain noisy estimates of both the ELBO
and its gradient. The basic procedure follows these steps:
\begin{enumerate}
\item draw $S$ samples $\{\mathbf{z}_s\}_1^S \sim q(\mathbf{z}\;;\;\lambda)$,
\item evaluate the argument of the expectation using $\{\mathbf{z}_s\}_1^S$, and
\item compute the empirical mean of the evaluated quantities.
\end{enumerate}
A Monte Carlo estimate of the gradient is then
\begin{align*}
\nabla_\lambda\;
\text{ELBO}(\lambda)
&\approx\;
\frac{1}{S}
\sum_{s=1}^{S}
\big[
\big(
\log p(\mathbf{x}, \mathbf{z}_s)
-
\log q(\mathbf{z}_s\;;\;\lambda)
\big)
\:
\nabla_\lambda \log q(\mathbf{z}_s\;;\;\lambda)
\big].
\end{align*}
This is an unbiased estimate of the actual gradient of the ELBO.
\subsection{Reparameterization gradient}
If the model has differentiable latent variables, then it is generally
advantageous to leverage gradient information from the model in order to
better traverse the optimization space. One approach to doing this is
the reparameterization gradient
\citep{kingma2014auto,rezende2014stochastic}.
Some variational distributions $q(\mathbf{z}\;;\;\lambda)$ admit useful
reparameterizations. For example, we can reparameterize a normal distribution
$\mathbf{z} \sim \text{Normal}(\mu, \Sigma)$ as
$\mathbf{z} \sim \mu + L \text{Normal}(0, I)$ where $\Sigma = LL^\top$. In general, write
this as
\begin{align*}
\epsilon &\sim q(\epsilon)\\
\mathbf{z} &= \mathbf{z}(\epsilon \;;\; \lambda),
\end{align*}
where $\epsilon$ is a random variable that does \textbf{not} depend on the
variational parameters $\lambda$. The deterministic function
$\mathbf{z}(\cdot;\lambda)$ encapsulates the variational parameters instead,
and following the process is equivalent to directly drawing $\mathbf{z}$ from
the original distribution.
The reparameterization gradient leverages this property of the
variational distribution to write the gradient as
\begin{align*}
\nabla_\lambda\;
\text{ELBO}(\lambda)
&=\;
\mathbb{E}_{q(\epsilon)}
\big[
\nabla_\lambda
\big(
\log p(\mathbf{x}, \mathbf{z}(\epsilon \;;\; \lambda))
-
\log q(\mathbf{z}(\epsilon \;;\; \lambda) \;;\;\lambda)
\big)
\big].
\end{align*}
The gradient of the ELBO is an expectation over the base
distribution $q(\epsilon)$, and the gradient can be applied directly
to the inner expression.
We can use Monte Carlo integration to obtain noisy estimates of both the ELBO
and its gradient. The basic procedure follows these steps:
\begin{enumerate}
\item draw $S$ samples $\{\epsilon_s\}_1^S \sim q(\epsilon)$,
\item evaluate the argument of the expectation using $\{\epsilon_s\}_1^S$, and
\item compute the empirical mean of the evaluated quantities.
\end{enumerate}
A Monte Carlo estimate of the gradient is then
\begin{align*}
\nabla_\lambda\;
\text{ELBO}(\lambda)
&\approx\;
\frac{1}{S}
\sum_{s=1}^{S}
\big[
\nabla_\lambda
\big(
\log p(\mathbf{x}, \mathbf{z}(\epsilon_s \;;\; \lambda))
-
\log q(\mathbf{z}(\epsilon_s \;;\; \lambda) \;;\;\lambda)
\big)
\big].
\end{align*}
This is an unbiased estimate of the actual gradient of the ELBO. Empirically, it
exhibits lower variance than the
score function gradient, leading to
faster convergence in a large set of problems.
For more details, see the \href{/api/}{API} as well as its
implementation in Edward's code base.
\subsubsection{References}\label{references}
| {
"alphanum_fraction": 0.6909624883,
"avg_line_length": 30.8271604938,
"ext": "tex",
"hexsha": "1ab24ec5e36b9498559d21576200c8d0d96da3ad",
"lang": "TeX",
"max_forks_count": 1004,
"max_forks_repo_forks_event_max_datetime": "2022-03-25T00:08:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-05-03T22:45:14.000Z",
"max_forks_repo_head_hexsha": "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "xiangze/edward",
"max_forks_repo_path": "docs/tex/tutorials/klqp.tex",
"max_issues_count": 724,
"max_issues_repo_head_hexsha": "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa",
"max_issues_repo_issues_event_max_datetime": "2022-02-28T02:41:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-05-04T09:04:37.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "xiangze/edward",
"max_issues_repo_path": "docs/tex/tutorials/klqp.tex",
"max_line_length": 89,
"max_stars_count": 5200,
"max_stars_repo_head_hexsha": "6419751d1d849c84c502e5ff3f7249b9bbc7b3aa",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "xiangze/edward",
"max_stars_repo_path": "docs/tex/tutorials/klqp.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T03:32:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-05-03T04:59:01.000Z",
"num_tokens": 2421,
"size": 7491
} |
\begin{savequote}[8cm]
‘The assumption is that when something turns out to not be ideal, it will be refactored again. Everything is subject to refactoring.’
\qauthor{--- Ward Cunningham, \textit{\usebibentry{cunningham2003collective}{title}} \citeyearpar{cunningham2003collective} }
\end{savequote}
\chapter{\label{ch:8-HFM1}Methodological adaptations to other studies of recombination}
%\otherpagedecoration
\minitoc{}
{\small{} \itshape{}
\paragraph{This chapter in brief —}
The method we previously implemented to detect recombination in single individuals can be used to study the role of genes essential to the process of recombination.
This requires the use of individuals homozygous for the mutant version of the gene but nonetheless displaying a high level of heterozygosity for recombination to be detectable.
As this can only be achieved with F2 individuals,
we adapted the method we implemented for simple F1 hybrids to such design.
Basically, we had to distinguish the polymorphic sites expressing variation between the two parental genomes from those originating from the third introgressed genome.
This implementation was as powerful as the original method and we could thus study the role of the interaction between HFM1 and MLH1:
we observed that impeding this interaction led to an increased recombination rate and shortened CO conversion tracts.
}
\newpage
As the method we implemented in Chapter~\ref{ch:5-methodology} allows to detect recombination in single individuals, it can be used to study the individual role of genes involved in the process of recombination.
In particular, Bernard de Massy and Valérie Borde are interested in the specific role of the mouse gene \textit{Hfm1} whose yeast homologue (\textit{MER3}) codes for a meiosis-specific DNA helicase \citep{nakagawa1999saccharomyces,nakagawa2002saccharomyces} that participates in CO control and in DNA heteroduplex extension \citep{mazina2004saccharomyces,nakagawa2002mer3}.
This gene is also essential to CO formation in other fungi \citep{sugawara2009coprinus}, plants \citep{mercier2005two,chen2005arabidopsis}, humans \citep{tanaka2006hfm1} and mice \citep{guiraldelli2013mouse}.
It was recently shown that, in yeasts, Mer3 can connect the MutL\textgreek{b} heterodimer of Mlh1-Mlh2 and that this interaction limits CT lengths genome-wide \citep{duroc2017concerted}.
In mice, the interplay between HFM1 and MLH1 is conserved, but whether or not its role in regulating CT length is also maintained remains a mystery.
To find that out, the laboratories of Valérie Borde and Bernard de Massy introgressed a punctual mutation that impedes the interaction between HFM1 and MLH1 (\textit{Hfm1\textsuperscript{KI}}) into F2 individuals, as I detail in the first section of this chapter.
In this experimental design, the individuals studied contain three genetic backgrounds and thus, our method to detect recombination needs to be refactored.
I describe in the last two sections of this chapter how we worked this out and what the preliminary results of this analysis were.
\section{Experimental design}
\subsection{Introgression of the mutant \textit{hfm1} allele}
\begin{table}[p]
\begin{subtable}[h]{\textwidth}
\subcaption{Ancestry of S28353 and S28355.}
\centering
\begin{adjustbox}{width = 1\textwidth}
\begin{tabular}{rrrrrrrr}
\toprule
\textbf{Mouse ID} & \textbf{Relationship} & \textbf{\% B6} & \textbf{\% DBA2} & \textbf{\% CAST} & \textbf{HFM1} & \textbf{Mother} & \textbf{Father} \\
\midrule
39856 & Maternal grandmother & 0.0 & 0.0 & 100.0 & WT/WT & N/A & N/A \\
28130 & Maternal grandfather & 75.0 & 25.0 & 0.0 & KI/WT & 72205 & N/A \\
F0\#2 (72205) & Paternal grandmother & 50.0 & 50.0 & 0.0 & KI/WT & N/A & N/A \\
N/A & Paternal grandfather & 100.0 & 0.0 & 0.0 & WT/WT & N/A & N/A \\
\midrule
22228 & Mother & 37.5 & 12.5 & 50.0 & KI/WT & 39856 & 28130 \\
28196 & Father & 75.0 & 25.0 & 0.0 & KI/WT & 72205 & N/A \\
\midrule
28353 & Mutant analysed & 56.25 & 18.75 & 25.0 & KI/KI & 22228 & 28196 \\
28355 & WT analysed & 56.25 & 18.75 & 25.0 & WT/WT & 22228 & 28196 \\
\bottomrule
\end{tabular}
\end{adjustbox}
\label{tab:ancestry-28353-28355}
\end{subtable}
\vspace{2cm}
\begin{subtable}[h]{\textwidth}
\subcaption{Ancestry of S28367.}
\centering
\begin{adjustbox}{width = 1\textwidth}
\begin{tabular}{rrrrrrrr}
\toprule
\textbf{Mouse ID} & \textbf{Relationship} & \textbf{\% B6} & \textbf{\% DBA2} & \textbf{\% CAST} & \textbf{HFM1} & \textbf{Mother} & \textbf{Father} \\
\midrule
F0\#3 (72212) & Maternal grandmother & 50.0 & 50.0 & 0.0 & KI/WT & N/A & N/A \\
N/A & Maternal grandfather & 100.0 & 0.0 & 0.0 & WT/WT & N/A & N/A \\
28163 & Paternal grandmother & 75.0 & 25.0 & 0.0 & KI/WT & 72205 & N/A \\
39978 & Paternal grandfather & 0.0 & 0.0 & 100.0 & WT/WT & N/A & N/A \\
\midrule
28172 & Mother & 75.0 & 25.0 & 0.0 & KI/WT & 72212 & N/A \\
28238 & Father & 37.5 & 12.5 & 50.0 & KI/WT & 28163 & 39978 \\
\midrule
28371 & Mutant analysed & 56.25 & 18.75 & 25.0 & KI/KI & 28172 & 28238 \\
\bottomrule
\end{tabular}
\end{adjustbox}
\label{tab:ancestry-28367}
\end{subtable}
\vspace{2cm}
\begin{subtable}[h]{\textwidth}
\subcaption{Ancestry of S28371.}
\centering
\begin{adjustbox}{width = 1\textwidth}
\begin{tabular}{rrrrrrrr}
\toprule
\textbf{Mouse ID} & \textbf{Relationship} & \textbf{\% B6} & \textbf{\% DBA2} & \textbf{\% CAST} & \textbf{HFM1} & \textbf{Mother} & \textbf{Father} \\
\midrule
39856 & Maternal grandmother & 0.0 & 0.0 & 100.0 & WT/WT & N/A & N/A \\
28130 & Maternal grandfather & 75.0 & 25.0 & 0.0 & KI/WT & 72205 & N/A \\
F0\#2 (72205) & Paternal grandmother & 50.0 & 50.0 & 0.0 & KI/WT & N/A & N/A \\
N/A & Paternal grandfather & 100.0 & 0.0 & 0.0 & WT/WT & N/A & N/A \\
\midrule
28250 & Mother & 37.5 & 12.5 & 50.0 & KI/WT & 39856 & 28130 \\
28198 & Father & 75.0 & 25.0 & 0.0 & KI/WT & 72205 & N/A \\
\midrule
28371 & WT analysed & 56.25 & 18.75 & 25.0 & WT/WT & 28250 & 28198 \\
\toprule
\end{tabular}
\end{adjustbox}
\label{tab:ancestry-28371}
\end{subtable}
\vspace{1cm}
\caption[Genealogy of the four mice analysed]
{\textbf{Genealogy of the four mice analysed.}
\par The genealogies (parents and grandparents) of each of the two mutant mice (IDs: 28353 and 28371) and of the two wild-type (WT) mice (IDs: 28355 and 28367) analysed in this study, as well as the characteristics (background composition in B6, CAST and DBA2 genomes, and the \textit{Hfm1} alleles carried: either the mutant impeding the interaction between HFM1 and MLH1 (KI) or the wild-type (WT) allele) of all the individuals involved in the ancestry are reported in the subtables above: \textbf{(a)} 28353 and 28353; \textbf{(b)} 28367; \textbf{(c)} 28371.
}
\label{tab:genealogies}
\end{table}
A mutant \textit{Hfm1} allele (\textit{Hfm1\textsuperscript{KI}}) was introduced in the zygote of a cross between two F1 mice deriving from hybridisations between two \textit{Mus musculus domesticus} strains: strain C57BL/6J, hereafter called B6 and strain DBA/2J, hereafter called DBA2.
The resulting founder mice (F0\#2 and F0\#3) were thus heterozygous for the \textit{Hfm1} gene (\textit{Hfm1\textsuperscript{WT/KI}}) and their genetic backgrounds were composed of 50\% DBA2 and 50\% B6 genomes.
Further crosses with other B6 and \textit{Mus musculus castaneus} (strain CAST/EiJ, hereafter called CAST) mice resulted in individuals carrying either two mutant alleles for \textit{Hfm1} (\textit{Hfm1\textsuperscript{KI/KI}}), two WT alleles (\textit{Hfm1\textsuperscript{WT/WT}}) or one allele of each (\textit{Hfm1\textsuperscript{WT/KI}}).
The genetic backgrounds for these mice were composed of a mixture of B6, DBA2 and CAST genomes (Table~\ref{tab:genealogies}).
Of these, two \textit{hfm1} homozygous mutant (28353 and 28367) and two WT (28355 and 28371) male mice were selected for further analysis: their sperm DNA was extracted and sonicated to produce fragments of a mean size of 450 bp.
\subsection{Target selection, DNA capture and sequencing}
Like in Chapter~\ref{ch:5-methodology}, we selected hotspots from the list identified by \citet{baker2015prdm9} on the basis of PRDM9 ChIP-seq peak detection.
We used the same criteria as before: a minimum of 4 SNPs in the 300-bp central region, a strict maximum of 60 sites with low sequence quality in the 1-kb central region and at least 90\% of identity between the B6 and the CAST reference genome on at least 80\% of the selected region.
Though, since the main aim of this analysis was to test for any effect of the \textit{Hfm1} mutation on CO CT length, we extended the width of our selected hotspots to 3 kb.
Thus, the third selection criterium discarded a larger number of candidate hotspots than in Chapter~\ref{ch:5-methodology}, since identity was required on 3 kb instead of 1 kb.
In the end, 890 3-kb long hotspots were retained and, as in Chapter~\ref{ch:5-methodology}, 500 control regions were added to that list of targets.\\
For the efficiency of DNA capture to be identical in both haplotypes, two baits were designed for each of the 1,390 targets: one corresponding to the CAST haplotype and one to the B6 haplotype.
We then performed two successive rounds of DNA capture on each of the four DNA samples from the four mice.
Libraries were then sequenced by an Illumina device using a 250-bp paired-end protocol, and the sequenced reads were mapped onto the B6 and the CAST reference genomes as described in Chapter~\ref{ch:5-methodology}.
Overall, read mapping statistics and capture efficiency were similar to what was found in Chapter~\ref{ch:5-methodology} (Table~\ref{tab:HFM1-statistics-seq-mapping-capture}).
\begin{table}[t]
\centering
\begin{adjustbox}{width = 1\textwidth}
\begin{tabular}{rrrrrrr}
\toprule
\multicolumn{2}{c}{\textbf{Sample}} & \multicolumn{2}{c}{\textbf{Mapping (\%)}} & \multicolumn{3}{c}{\textbf{Capture efficiency}} \\
\cmidrule(l){1-2} \cmidrule(l){3-4} \cmidrule(l){5-7}
\textbf{Library} & \textbf{Library} & \textbf{Ref.} & \textbf{Ref.} & \textbf{\# Filtered} & \textbf{\% in} & \textbf{\# in} \\
\textbf{ID} & \textbf{size} & \textbf{B6} & \textbf{CAST} & \textbf{Fragments} & \textbf{targets} & \textbf{targets} \\
\midrule
28355 & 164,210,468 & 98.76 & 98.00 & 162,168,344 & 48.62 & 78,851,718 \\
28371 & 171,930,499 & 98.25 & 98.20 & 170,081,808 & 48.63 & 82,713,025 \\
\textbf{Total WT} & \textbf{336,140,967} & \textbf{98.84} & \textbf{98.10} & \textbf{332,250,152} & \textbf{48.63} & \textbf{161,564,743} \\
\\
28353 & 161,294,272 & 99.15 & 98.35 & 159,920,297 & 48.62 & 78,851,718 \\
28367 & 227,590,570 & 97.91 & 97.18 & 222,826,196 & 37.11 & 82,713,025 \\
\textbf{Total mutants} & \textbf{388,884,842} & \textbf{98.42} & \textbf{97.67} & \textbf{382,746,493} & \textbf{48.63} & \textbf{186,150,465} \\
\bottomrule
\end{tabular}
\end{adjustbox}
\caption[Sequencing, mapping and capture-efficiency summary metrics]
{\textbf{Sequencing, mapping and capture-efficiency summary metrics.}
\par Reads were mapped onto the B6 and CAST reference genomes, and fragments were filtered as described in Chapter~\ref{ch:5-methodology}.
The lines in bold represent the totals for the two WT and the two mutant mice.
}
\label{tab:HFM1-statistics-seq-mapping-capture}
\end{table}
\subsection{Expected genetic background composition}
The point mutation on \textit{Hfm1} originated from B6/DBA2-background founder mice (F0\#2 and F0\#3) and was introgressed into a B6xCAST hybrid \textit{via} two consecutive crosses:
on the one hand, the founder mice were crossed with B6/B6-background mice, thus yielding one 75\%-B6/25\%-DBA2 parent;
on the other hand, other 75\%-B6/25\%-DBA2 mice were crossed with CAST mice to yield a second parent with a background composed of 37.5\% B6, 12.5\% DBA2 and 50\% CAST genomes (Table~\ref{tab:genealogies}).
Each of the four selected mice (28353, 28355, 28367 and 28371) were then obtained by crossing the two aforementioned parents together.
Thus, their background encompassed 56.25\% B6, 18.75\% DBA2 and 25\% CAST genomes.
\begin{table}[t]
\centering
\begin{tabular}{rrrr}
\toprule
\multicolumn{2}{c}{\textbf{Detailed}} & \multicolumn{2}{c}{\textbf{Simplified}} \\
\cmidrule(l){1-2} \cmidrule(l){3-4}
\textbf{Background} & \textbf{\% expected} & \textbf{Background} & \textbf{\% expected} \\
\cmidrule(l){1-2} \cmidrule(l){3-4}
B6/B6 & 28.125 & \multirow{3}{*}{DOM/DOM} & \multirow{3}{*}{50.0} \\
B6/DBA2 & 18.750 & & \\
DBA2/DBA2 & 3.125 & & \\
\cmidrule(l){1-2} \cmidrule(l){3-4}
B6/CAST & 37.500 & \multirow{2}{*}{DOM/CAST} & \multirow{2}{*}{50.0} \\
DBA2/CAST & 12.500 & & \\
\cmidrule(l){1-2} \cmidrule(l){3-4}
CAST/CAST & 0.000 & CAST/CAST & 0.0 \\
\bottomrule
\end{tabular}
\caption[Expected distribution of genetic backgrounds in the mice analysed]
{\textbf{Expected distribution of genetic backgrounds in the mice analysed.}
\par Because the B6 and DBA2 genomes present high sequence conservation \citep{davis2005genomewide}, we regrouped them under the label ‘DOM’.
The expected genomic proportion (and thus proportion of targets) in each of the six possible ‘detailed’ backgrounds were reported on the left panel and the expected proportions in each of the three ‘simplified’ backgrounds were reported on the right panel.
}
\label{tab:background-expected}
\end{table}
More precisely, the expected genomic proportion (and therefore, the expected proportion of targets) of each genetic background were those reported in Table~\ref{tab:background-expected}.
Overall, 68.75\% of the targeted loci were expected to be heterozygous (either B6/DBA2, B6/CAST or DBA2/CAST) and could, in principle, be used to detect recombination events.
However, the power to detect recombination depends on the density of heterozygous sites, and the latter is much lower at B6/DBA2-background targets than at B6/CAST- or DBA2/CAST-background loci.
Indeed, the B6 and the DBA2 genomes present a low sequence divergence of 0.2\% \citep{keane2011mouse} because these two strains derive from the same mouse subspecies (\textit{Mus musculus domesticus}) from which they inherited large genomic regions \citep{davis2005genomewide}.
We note that, since the latter two strains derive from the same subspecies, we will regroup the labels B6 and DBA2 under a more general notation: ‘DOM’.
In comparison, as the DOM (B6 or DBA2) and CAST strains derive from two distinct subspecies which diverged about 350,000 to 500,000 years ago \citep{geraldes2008inferring}, they present a much higher genome-wide divergence of 0.74\% \citep{keane2011mouse}.
Therefore, in order to avoid any spurious fluctuation in detectability between individuals and to thus allow the comparison of recombination rates across samples, we chose to search for recombination events exclusively in one type of heterozygous background.
And, so as to maximise the detectability of recombination events, we focused on the background displaying the highest rate of polymorphism: DOM/CAST-background targets.
The following section will be dedicated to detailing the procedure we implemented to identify them specifically.
\section{Detection of recombination in F2 individuals}
\subsection{Inference of the origin of polymorphic sites}
\begin{figure}[b!]
\centering
\includegraphics[width = 0.8\textwidth]{figures/inkscape/HFM1-origin-markers-with-braces.eps}
\caption[The three possible types of polymorphic sites]
{\textbf{The three possible types of polymorphic sites.}
\par According to the principle of parsimony, any polymorphic site (circle) should result, in most cases, in two of the strains carrying the same allele and one of them carrying a different one.
In this example, the polymorphic site on the left corresponds to a DOM-CAST marker, where the B6 and the DBA2 haplotypes carry the same allele, different from that of the CAST haplotype.
The polymorphic sites in the middle and on the right correspond to two B6-DBA2 markers, with either the B6 (middle) or the DBA2 (right) haplotype carrying the same allele as the CAST one.
Given the divergence between strains (see main text), DOM-CAST markers occur more often than the B6-DBA2 markers.
}
\label{fig:marker-origin}
\end{figure}
Distinguishing the targets of interest (DOM/CAST-background targets) from others (DOM/DOM-background targets) comes back to genotyping the DOM-CAST markers (i.e.\ the polymorphic sites for which the CAST strain carries an allele different from that carried by the B6 and the DBA2 strains).
Though, given that the F2 individuals carry a mosaic of three genomes, three types of polymorphic sites can occur: either the B6, the DBA2 or the CAST genome carries an allele different from that of the other two (Figure~\ref{fig:marker-origin}).
Therefore, prior to genotyping targets, the DOM-CAST markers must be distinguished from the other (B6-DBA2) markers.
Given the crosses made, no portion of the genome of the F2 individuals could display a CAST/CAST background (Table~\ref{tab:background-expected}).
Therefore, if, at a given polymorphic site, at least one of the four individuals is homozygous for the allele carried by the CAST strain, the site necessarily corresponds to a B6-DBA2 marker (Figure~\ref{fig:marker-origin}).
We distinguished between B6-DBA2 and DOM-CAST markers on this basis.
\subsection{Identification of the genetic background}
% RIGHT PAGE
\begin{sidewaysfigure}[p]
\centering
\leftskip-2.4cm
\rightskip-2.4cm
\rotfloatpagestyle{empty}
\includegraphics[width = 1.25\textwidth]{figures/chap8/HFM1_background_28353-DOM.eps}
\captionsetup{width=1.25\textwidth, margin={-2.2cm, -3.3cm}}
\caption[Mosaic of genetic backgrounds inferred at each target along the autosomes of mouse 28353]
{\textbf{Mosaic of genetic backgrounds inferred at each target along the autosomes of mouse 28353.}
\par Chromosomes are represented in grey and oriented so that the centromere is on the bottom side of the figure (mouse chromosomes are acrocentric).
Each segment corresponds to the position of a target (hotspot or control region) and was coloured in red when the background inferred was DOM/DOM (homozygous) and in blue when the background inferred was DOM/CAST (heterozygous).
The corresponding figures for the three other mice (28355, 28367, 28371) are reported in Appendix~\ref{app:data-and-figs}.
}
\label{fig:mosaic-backgrounds}
\end{sidewaysfigure}
Next, we inferred the genetic backgrounds using the following criteria:
if more than 90\% of the DOM-CAST markers of a given hotspot were genotyped as heterozygous in a given individual, a DOM/CAST background was inferred;
if more than 90\% of the DOM-CAST markers were genotyped as homozygous, a DOM/DOM background was inferred;
in any other case, the background was not inferred.
Out of the 4$\times$1390 targeted loci, 145 (2.6\%) had a read coverage too low for the target to be genotyped.
Aside from those, the aforementioned \textit{modus operandi} allowed us to genotype 97.5\% of all the targets presenting sufficient coverage and ended in a mosaic of DOM/DOM and DOM/CAST genetic backgrounds consistent with 0 or 1 (and sometimes 2) crossing-overs per chromosome (Figure~\ref{fig:mosaic-backgrounds} and Appendix~\ref{app:data-and-figs}).
This provided strong support that our inference was correct.
Among the remaining 2.5\% (135) ambiguous targets, 6 (4\%) were flanked by DOM/DOM-background targets on one side and by DOM/CAST-background targets on the other side: these most likely corresponded to sites where recombination occurred in one of the parents.
All other ambiguous targets (94\%) were flanked on both sides by DOM/DOM-background targets: these were most likely erroneously inferred because some B6-DBA2 markers were erroneously classified as DOM-CAST markers.
All in all, across all 1,390 loci of the 4 mice, 7 were incongruent with the surrounding genetic background (either because they were subject to a double crossing-over, or because our inference was incorrect at these sites). We thus chose to remove them from the analysis.
Altogether, the proportion of heterozygous DOM/CAST-background targets (Table~\ref{tab:proportion-het-backgrounds}) was close to the expected 50\% (Table~\ref{tab:background-expected}).
To further verify that these observed proportions fitted what was expected, we simulated a DOM/CAST$\times$DOM/DOM cross in which COs (number given by the sex-averaged genetic length) were drawn randomly along each chromosome. We found that the distribution of the expected proportion of heterozygous targets (data not shown) fitted with the observations (Table~\ref{tab:proportion-het-backgrounds}).
This genotyping map also allowed to control that all four mice were heterozygous for \textit{Prdm9} since this gene was located in a DOM/CAST background in all samples.
% \clearpage
\begin{table}[t]
\centering
\begin{tabular}{rrrrrrr}
\toprule
\textbf{Category} & \textbf{Sample} & \textbf{\# DOM/CAST} & \textbf{\# DOM/DOM} & \textbf{\% of Het.} \\
\textbf{} & \textbf{} & \textbf{background} & \textbf{background} & \textbf{targets} \\
\midrule
\multirow{2}{*}{WT} & 28355 & 764 & 561 & 57.66 \\
& 28371 & 845 & 461 & 64.70 \\
\midrule
\multirow{2}{*}{Mutant} & 28353 & 663 & 669 & 49.77 \\
& 28367 & 624 & 693 & 47.38 \\
\midrule
\multicolumn{2}{r}{\textbf{Total}} & \textbf{2896} & \textbf{2384} & \textbf{54.85} \\
\bottomrule
\end{tabular}
\caption[Observed proportion of heterozygous targets in the studied mice]
{\textbf{Observed proportion of heterozygous targets in the studied mice.}
\par The background for each hotspot was inferred as described in the main text, for wild-type (WT) mice (top panel) and mutant mice (bottom panel). The line in bold represents the average across all four mice.
}
\label{tab:proportion-het-backgrounds}
\end{table}
\begin{table}[b!]
\centering
\begin{tabular}{rrrrrr}
\toprule
\textbf{Target} & & \textbf{Nb of} & \textbf{Nb of} & \textbf{Nb of} & \textbf{Event rate} \\
\textbf{category} & \textbf{Sample} & \textbf{targets} & \textbf{fragments} & \textbf{events} & \textbf{($\times$ 10\textsuperscript{-6})} \\
\midrule
\multirow{7}{*}{\textbf{\textit{Hotspots}}} & 28355 & 485 & 28,181,748 & 1,298 & 46.1 \\
& 28371 & 552 & 34,015,365 & 1,847 & 54.3 \\
& \textbf{Tot.\ WT} & \textbf{1037} & \textbf{62,197,113} & \textbf{3,145} & \textbf{50.6} \\
\\
& 28353 & 429 & 25,598,721 & 3,486 & 136 \\
& 28367 & 390 & 30,863,121 & 2,082 & 67.4 \\
& \textbf{Tot.\ mutants} & \textbf{819} & \textbf{56,461,842} & \textbf{5,568} & \textbf{98.6} \\
\midrule
\multirow{7}{*}{\textbf{\textit{Controls}}} & 28355 & 279 & 15,206,411 & 34 & 2.24 \\
& 28371 & 293 & 16,997,729 & 58 & 3.41 \\
& \textbf{Tot.\ WT} & \textbf{572} & \textbf{32,204,140} & \textbf{92} & \textbf{2.86} \\
\\
& 28353 & 234 & 13,658,994 & 33 & 2.42 \\
& 28367 & 234 & 17,565,253 & 25 & 1.42 \\
& \textbf{Tot.\ mutants} & \textbf{468} & \textbf{31,224,247} & \textbf{58} & \textbf{1.86} \\
\midrule
\multicolumn{1}{r}{\textbf{FP rate}} & \multicolumn{5}{r}{\textbf{3.22 \%}} \\
\bottomrule
\end{tabular}
\caption[Number of events detected in hotspot and control targets]
{\textbf{Number of events detected in hotspot and control targets.}
\par Events (false positives (FPs) or genuine recombination events) were detected using the unique-molecule genotyping pipeline described in Chapter~\ref{ch:5-methodology}.
All fragments or events overlapping at least 1 bp with a given target are counted in this table.
The event rate corresponds to the ratio of candidate recombination events over the total number of fragments.
The maximum false positive (FP) rate is the ratio of the event rate in control targets over that in hotspots.
The lines in bold represent the totals for the two WT and the two mutant mice.
}
\label{tab:HFM1-FP-rate}
\end{table}
\subsection{Detection of events in heterozygous hotspots}
Finally, for each individual, we applied the unique-molecule genotyping pipeline described in Chapter~\ref{ch:5-methodology} to all the heterozygous targets and we found that the maximum FP error rate for this re-adaptation of our approach (3.22\%, Table~\ref{tab:HFM1-FP-rate}) was similar to that from Chapter~\ref{ch:5-methodology} (3.73\%, Table~\ref{tab:FP-rate}).
Altogether thus, our procedure was as efficient to detect recombination events in F2 individuals containing three genetic backgrounds as it was for F1 hybrids.
From this point on, we could thus assess the impact of the \textit{hfm1} mutation on several aspects of recombination.
\section{Impact of the mutation on recombination}
\subsection{Impact on the recombination rate (RR)}
\label{chap8:recombination-rate}
We observed that the recombination rate (RR) was, on average, almost twice as high for mutants as for WT mice (Table~\ref{tab:HFM1-FP-rate}).
This finding was unexpected since the only effect of the interaction between Mer3 and Mlh1 that was reported in yeasts concerned the length of gene conversion tracts, but not the recombination rate \citep{duroc2017concerted}.
In our case, this modification of the RR was majoritarily driven by the extremely high recombination rate of mouse 28353 (136 events per million of sequenced fragments, Table~\ref{tab:HFM1-FP-rate}), which was over twice that of the other mutant mouse 28367 (67.4 events per million of sequenced fragments).
Though, if, say, the subset of heterozygous hotspots of mouse 28353 were more intense (i.e.\ displayed higher recombinational activity on average than other hotspots), this observation would not correspond to a genuine biological effect.
In the following subsection, I describe how we thus controlled for such technical biases.
\subsection{Pairwise comparison of the RR in shared hotspots}
\begin{figure}[p]
\centering
\begin{subfigure}[b]{0.75\textwidth}
\subcaption{Between the two WT mice}
\includegraphics[width=\textwidth]{figures/chap8/28371_vs_28355.eps}
\end{subfigure}
\vspace{0.5cm}
\begin{subfigure}[b]{0.75\textwidth}
\subcaption{Between the two mutant mice}
\includegraphics[width=\textwidth]{figures/chap8/28367_vs_28353.eps}
\end{subfigure}
\caption[Correlation of the number of recombination events in shared hotspots for the two WT and the two mutant mice]
{\textbf{Correlation of the number of recombination events in shared hotspots for the two WT (a) and the two mutant (b) mice.}
\par The linear regression was significant for the two WT mice (slope $=1.03$; \textit{p}-val~$<~2~\times~10^{-16}$; $n_{hotspots} = 257$) and for the two mutant mice (slope = $0.69$; \textit{p}-val $< 2 \times 10^{-16}$; $n_{hotspots}~=~241$).
Figures for all other pairwise correlations are reported in Appendix~\ref{app:data-and-figs}.
Results of the linear correlation were similar for Rec-1S and Rec-2S events, as well as when controlling for the total number of events sequenced at each hotspot (data not shown).
}
\label{fig:pairwise-RR-shared}
\end{figure}
To test whether the variation in overall recombination rate (RR) across mice was due to the fact that the sets of hotspots analysed (i.e.\ heterozygous hotspots) were different between mice, we performed comparisons of the RR in shared hotspots for all pairs of mice (Figure~\ref{fig:pairwise-RR-shared} and Appendix~\ref{app:data-and-figs}).
We found that the difference in recombination rates between WT and mutant mice was observed even for shared hotspots, which proved that the effect was not due to a differential sampling of heterozygous loci.
In addition, to see whether the difference in RR applied specifically to one type of recombination products (either COs or NCOs), we reproduced the pairwise comparisons separately for Rec-1S and Rec-2S events (Appendix~\ref{app:data-and-figs}).
We found that the results were similar for both Rec-1S and Rec-2S events, which showed that both COs and NCOs were affected.
All in all, the RRs for the two WT mice were extremely close (Figure~\ref{fig:pairwise-RR-shared}.a.): the slope of the linear regression was almost 1 (slope $=1.03$; \textit{p}-val $<2 \times 10^{-16}$).
However, the recombination rates between the two mutant mice was extremely variable (Table~\ref{tab:HFM1-FP-rate} and Figure~\ref{fig:pairwise-RR-shared}.b.).
What drives such variability among \textit{hfm1} mutants remains, at this stage, unknown: to get more insight into this topic, it would be necessary to analyse the data from additional mutant mice displaying distinct mosaics of genetic backgrounds, and see, for instance, if the increased RR is associated to a given locus in the DOM/DOM or DOM/CAST background.
\subsection{Impact on CO tract length}
\begin{table}[b!]
\centering
\begin{adjustbox}{width = 1\textwidth}
\begin{tabular}{rrrrrrr}
\toprule
& \multicolumn{3}{c}{\textbf{WT}} & \multicolumn{3}{c}{\textbf{Mutant}} \\
\cmidrule(l){2-4} \cmidrule(l){5-7}
\textbf{Parameter} & \textbf{Both} & \textbf{28355} & \textbf{28371} & \textbf{Both} & \textbf{28353} & \textbf{28367} \\
\midrule
\textbf{\textit{CO:NCO ratio}} & 0.108 [0.009--0.189] & 0.095 & 0.098 & 0.092 [0.0003--0.40] & 0.051 & 0.166 \\
\textbf{\textit{CO CT length}}\\
$Mean$ & 744 [219--2790] & 539 & 654 & 236 [145--478] & 238 & 253 \\
$Sd$ & 582 [101--765] & 514 & 759 & 292 [30--397] & 416 & 232 \\
\textbf{\textit{NCO CT length}}\\
$Mean$ & 34 [5--47] & 35 & 31 & 30 [0.75--397] & 49 & 32 \\
$Sd$ & 43 [1--101] & 38 & 57 & 108 [13--260] & 130 & 66 \\
\bottomrule
\end{tabular}
\end{adjustbox}
\caption[Recombination parameters inferred from an approximate bayesian computation for WT and mutant mice]
{\textbf{Recombination parameters inferred from an approximate bayesian computation for WT and mutant mice.}
\par Parameters (CO:NCO ratio and CO and NCO conversion tract (CT) length reported in bp) were estimated for the two WT and the two mutant mice.
95\% confidence intervals were reported between brackets.
Because the observed recombination rate varied greatly between the two mutants (Subsection~\ref{chap8:recombination-rate}), we also reported than point estimates for all single individuals.
}
\label{tab:HFM1-ABC-results}
\end{table}
Finally, because the interaction between the HFM1 yeast homologue (Mer3) and the MLH1 yeast homologue (Mlh1) has been shown to play a role in DNA heteroduplex extension \citep{duroc2017concerted}, we wanted to assess whether tract lengths differed between the WT and the \textit{hfm1} mutant mice.
Because CO and NCO CT lengths are not directly observable from the data, we performed an approximate bayesian computation (ABC) similar to what was described in Chapter~\ref{ch:6-recombination-parameters}, based on 50,000 simulations reproducing this experiment (thus, as compared to Chapter~\ref{ch:6-recombination-parameters}, we modified hotspot width, fragment start and stop positions and polymorphic sites to fit with this experiment).\\
Altogether, the CO:NCO ratio and the NCO CT length estimated for WT mice were strikingly close to those measured on the WT mice of Chapter~\ref{ch:6-recombination-parameters} (Table~\ref{tab:ABC-results}).
CO CTs were slightly longer (albeit \textit{not} significantly) than those found in the previous ABC, which likely comes from the fact that the targeted regions were wider in this experiment whereas the maximum distance between DSB sites and CO switch points was limited in the previous one.
Interestingly, we found a clear CO CT length reduction in \textit{hfm1} mutant mice as compared to WT mice (Table~\ref{tab:HFM1-ABC-results}).
Because the observed recombination rates varied greatly between the two mutants (see Subsection~\ref{chap8:recombination-rate}), we checked whether this effect was also visible in single individuals and found that, indeed, the inferred conversion tract lengths were stable, no matter the recombination rate.
This observation was consistent with the idea that, in mice, the interaction between HFM1 and MLH1 plays a role in extending the DNA heteroduplex.
Surprisingly, this effect was the oppposite to what had been previously observed in yeasts \citep{duroc2017concerted} but the biological reason why the role of the interaction between HFM1 and MLH1 differs between these two species remains to be determined.\\
In summary, the method we implemented to detect recombination was adaptable to cases where other genomes had been introgressed into the hybrid and allowed to gain new insight into recombination in mice.
Though, as any approach, it had inherent limitations, which I will discuss in the following chapter, together with the scientific implications of the whole work done in the context of this thesis.
| {
"alphanum_fraction": 0.7217575686,
"avg_line_length": 63.8270676692,
"ext": "tex",
"hexsha": "c8b191c95f9af1b65c8a56c3c6a89dec402db8d3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f95237ad4f90f28a0fd7e429d3f8a1fd393e7224",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "MaudGautier/PhD-thesis",
"max_forks_repo_path": "text/chap8-HFM1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f95237ad4f90f28a0fd7e429d3f8a1fd393e7224",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "MaudGautier/PhD-thesis",
"max_issues_repo_path": "text/chap8-HFM1.tex",
"max_line_length": 564,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f95237ad4f90f28a0fd7e429d3f8a1fd393e7224",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "MaudGautier/PhD-thesis",
"max_stars_repo_path": "text/chap8-HFM1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 10161,
"size": 33956
} |
\section{Modal pomsets}
In order to perform a sharper analysis of dependency, we present an alternate
semantics using modal pomsets defined below. Modal pomsets make a formal
distinction between strong order and weak order.
\begin{definition}
A \emph{modal (memory model) pomset} is a tuple
$(\Event, {\sle}, {\gtN},
\labeling)$, such that
\begin{itemize}
\item $(\Event, {\gtN},
\labeling)$ is a (memory model) pomset, and
\item ${\sle} \subseteq {\gtN}$ is a partial order.
\end{itemize}
\end{definition}
We write $\bEv\slt\aEv$ when $\bEv\sle\aEv$ and $\bEv\neq\aEv$, and similarly for $\gtN$. Thus, $(\sle \cup \reco)^{*} \subseteq {\gtN}$.
We list out a few observations to illustrate the relationship between \tvalpom s and pomsets. We are given a \tvalpom,
$(\Event, {\sle}, {\gtN}, \labeling)$. Then:
\begin{itemize}
\item $(\Event, {\gtN},\labeling)$ is a pomset with the same reads-from relation.
\item Let $\reco$ be the restriction of $\gtN$ to conflicting actions on the same location. Then, $(\Event, {\sle}, (\sle \cup \reco)^{*}, \labeling)$ is a \tvalpom, and $(\sle \cup \reco)^{*} \subseteq {\gtN}$.
\end{itemize}
\paragraph*{Changes to definitions}
The definition of the semantics of programs using \tvalpom\ largely follows the one using pomsets. We sketch the changes to definitions below.
\begin{itemize}
\item
We say that $\bEv$ \emph{fulfills $\aEv$ on $\aLoc$} if $\bEv$ writes
$\aVal$ to $\aLoc$, $\aEv$ reads $\aVal$ from $\aLoc$,
\begin{itemize}
\item $\bEv \slt \aEv$, and
\item if an event $\cEv$ writes to $\aLoc$ then either $\cEv \gtN \bEv$ or $\aEv \gtN \cEv$.
\end{itemize}
\item
Augmentation has to include ${\slt}$. i.e
$\aPS'$ is an \emph{augmentation} of $\aPS$ if $\Event'=\Event$,
${\labeling'}={\labeling}$, ${\sle'}\supseteq{\sle}$, and
${\gtN'}\supseteq{\gtN}$.
\item The definitions of substitution, restriction and the filtering
operations stay the same, with $\sle$ carried over unchanged. For
example, substitution is defined as follows:
Let $\aPSS\aSub$ be the set $\aPSS'$ where $\aPS'\in\aPSS'$ whenever
there is $\aPS\in\aPSS$ such that:
$\Event' = \Event$,
${\sle'} = {\sle}$,
${\gtN'} = {\gtN}$,
and
$\labeling'(\aEv) = (\bForm\aSub \mid \aAct)$ when $\labeling(\aEv) = (\bForm \mid \aAct)$.
\item In composition,we require ${\sle'}\supseteq{\sle^1}\cup{\sle^2}$
\item The changes to the definition \ref{def:prefix} of prefixing are as follows. The key changes are that synchronization and dependency enforce $\slt$ whereas coherence only enforces $\gtN$.
\begin{itemize}
\item ${\sle'}\supseteq{\sle}$.
% \item Item 5b changes to: if $\aEv$ is a write then either $\cEv\slt'\aEv$
% or $\labelingForm'(\aEv)$ implies $\labelingForm(\aEv)$.
\item 5b changes to: if $\bEv$ and $\aEv$ are \external actions in conflict, then $\bEv \gtN' \aEv$,
% \item Item \ref{pre-coherence} changes to:
% if $\aAct$ is a write that conflicts with $\labelingAct(\aEv)$
% then $\cEv \gtN' \aEv$,
\item Item 5a, 5c, 5d, 5e 5c change to impose $\slt$ order: eg. if $\aAct$ is an acquire or $\labelingAct(\aEv)$ is a release then $\cEv \slt' \aEv$.
\end{itemize}
\end{itemize}
We use $\tsem{\aCmd}$ to stand for the \tvalpom\ semantics of $\aCmd$.
\subsection{Generators. } Modal pomsets provide a characterization of generators from section~\ref{sec:sc}.
Recall that \emph{generators} in the pomset semantics are pomsets that are minimal with respect to augmentation and implication. These generators are induced by pomsets that are minimal with respect to augmentation and implication in the \tvalpom\ semantics in the following sense.
$(\Event, {\gtN},\labeling)$ is a generator for $\sem{\aCmd}$
if there exists $(\Event, \slt, {\gtN},\labeling) \in \tsem{\aCmd}$ minimal w.r.t.~augmentation and implication, and $\gtN = (\sle \cup \reco)^{*}$.
Furthermore, any strong order that is outside of program order must be induced by a reads-from. In the two-thread case, we can state the latter
property as follows: suppose $\aEv$ and $\bEv$ are not related by program
order and $\aEv\slt\bEv$; then there exist $\bEv'$ that reads-from $\aEv'$
such that $\aEv\xpox\aEv'$, $\bEv'\xpox\bEv$ and
$\aEv \slt \aEv' \slt \bEv' \slt \bEv$.
\subsection{Closure properties}
The fine grain analysis of dependency in the modal semantics allows us to establish some closure properties of the semantics of programs.
We consider programs of the
form $\vec{\aLoc}\GETS\vec{0}\SEMI\FENCE\SEMI\aCmd$, where $\aCmd$ is
restriction-free. Thus, all memory locations are initialized to $0$,
initialization happens-before the execution of any command,
We say that $\aPS' = \aPS\restrict{\Event'}$ when
$\Event' \subseteq \Event$,
${\labeling'} = {\labeling}\restrict{\Event'}$, and
${\le'} = {\le}\restrict{\Event'}$.
% ${\gtN'} = {\gtN}\restrict{\Event'}$.
\begin{definition}
Let $(\aPS \after \aEv) = {\{ \bEv\in\Event \mid \aEv \le \bEv
\}}$ be the set of events that follow $\aEv$ in $\aPS$.
\end{definition}
The semantics of read is ``input''-enabled, since it permits the read of any visible value. Thus, any racy read in a program can be replaced by a read of a earlier value (w.r.t.~$\reco$), even while the races with existing independent writes are maintained. A canonical example to keep in mind for this lemma is the program:
\begin{align*}
(y\GETS 0 \SEMI \aReg \GETS y \SEMI x \GETS 1)
\PAR
(x\GETS 0 \SEMI \bReg \GETS x \SEMI y \GETS 1)
\end{align*}
with both registers getting value $1$ via the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{ry1}{\DR{y}{1}}{right=of wy0}
\event{wx1}{\DW{x}{1}}{right=of ry1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{rx1}{\DR{x}{1}}{right=of wx0}
\event{wy1}{\DW{y}{1}}{right=of rx1}
\rf{wx1}{rx1}
\rf{wy1}{ry1}
\wk{wx0}{rx1}
\wk{wy0}{ry1}
\end{tikzdisplay}
The lemma constructs the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{ry1}{\DR{y}{0}}{right=of wy0}
\event{wx1}{\DW{x}{1}}{right=of ry1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{rx1}{\DR{x}{0}}{right=of wx0}
\event{wy1}{\DW{y}{1}}{right=of rx1}
\rf{wx0}{rx1}
\rf{wy0}{ry1}
\wk{rx1}{wx1}
\wk{ry1}{wy1}
\end{tikzdisplay}
\begin{lemma}\label{inputen}
%Let $\aCmd = \vec{\aLoc}\GETS\vec{0}\SEMI \FENCE\SEMI (\aCmd^1 \PAR \cdots \PAR \aCmd^n)$.
Let $\aPS \in \tsem{\aCmd}$ be a top level pomset.
Let $\aEv \in \aPS$ read from write event $\bEv$ on $\aLoc$, $\neg(\bEv \xhb \aEv)$.
Then, there exists $\bPS \in \tsem{\aCmd}$ such that:
\begin{itemize}
%\item $(\exists \aEv' \in \Event_{\bPS})$ such that $
%\Event_{\bPS}$ is the disjoint union of $\Event_{\aPS} \setminus
%(\aPS \after \aEv))$ and $(\bPS \after \aEv')$.
\item $\aEv'$ reads from $\aLoc$, with matching write event $\bEv'$, such that $\bEv' \xeco \bEv$ in $\bPS$
\item The restriction of $\sle$ in $\aPS$ to $\Event_{\aPS} \setminus (\aPS \after \aEv)$ agrees with the restriction of $\sle$ in $\bPS$ to $\Event_{\bPS} \setminus (\aPS \after \aEv)$ in $\bPS$.
\item The restriction of $\le$ in $\aPS$ to $\Event_{\aPS} \setminus (\aPS \after \aEv)$ agrees with the restriction of $\le$ in $\bPS$ to $\Event_{\bPS} \setminus (\aPS \after \aEv)$ in $\bPS$.
\end{itemize}
\end{lemma}
\begin{proof}
The form of $\aCmd$ ensures that there is always a write to $\aLoc$ that is related by $\xhb$ to any read. Thus, there is at least one other write than can satisfy the read recorded as $\aEv$.
The key observation behind the proof is that change in a prefixing read action can only affect the events that are dependent, ie. in the $\slt$ order to the read action.
\end{proof}
In the following lemma, invert the $\reco$ relationship between a read and a write. A canonical example to keep in mind for this lemma is the program:
\begin{align*}
(y\GETS 0 \SEMI x \GETS 1 \SEMI \aReg \GETS y)
\PAR (x\GETS 0 \SEMI y \GETS 1 \SEMI \bReg \GETS x)
\end{align*}
with both registers getting value $0$ via the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{wx1}{\DW{x}{1}}{right=of wy0}
\event{ry0}{\DR{y}{0}}{right=of wx1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{wy1}{\DW{y}{1}}{right=of wx0}
\event{rx0}{\DR{x}{0}}{right=of wy1}
\rf[bend right]{wx0}{rx0}
\rf[bend left]{wy0}{ry0}
\wk{rx0}{wx1}
\wk{ry0}{wy1}
\wk{wx0}{wx1}
\wk{wy0}{wy1}
\end{tikzdisplay}
The lemma constructs the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{y}{0}}{}
\event{wx1}{\DW{x}{1}}{right=of wy0}
\event{ry0}{\DR{y}{1}}{right=of wx1}
\event{wx0}{\DW{x}{0}}{below=of wy0}
\event{wy1}{\DW{y}{1}}{right=of wx0}
\event{rx0}{\DR{x}{1}}{right=of wy1}
\rf{wx1}{rx0}
\rf{wy1}{ry0}
\wk{wx0}{wx1}
\wk{wy0}{wy1}
\end{tikzdisplay}
\begin{lemma}\label{removerw}
Let $\aPS \in \tsem{\aCmd}$ be a top-level pomset.
Let $\bEv \in \aPS$ be a write on $\aLoc$.
Let $\aEv \in \aPS$ read from $\aLoc$ such that $\aEv \xeco \bEv$ and $\neg(\aEv \slt \bEv)$. Then, there exists $\bPS \in \tsem{\aCmd}$ such that:
\begin{itemize}
\item $\aEv' \in \bPS \setminus \aPS$ reads from $\aLoc$, with matching write $\bEv$.
\item The restriction of $\sle$ in $\aPS$ to $\Event_{\aPS} \setminus (\aPS\ \after\ \aEv)$ agrees with the restriction of $\sle$ in $\bPS$ to $\Event_{\bPS} \setminus (\aPS\ \after\ \aEv)$.
\end{itemize}
\end{lemma}
\begin{proof}
The proof proceeds similar to the above proof; in this case, replace the value read in $\aEv$ to come from $\bEv$.
\end{proof}
Any new event $\bEv'$ in $\bPS \after \aEv'$ reading from $\aLoc$ cannot have a matching write event $\bEv'' \xeco \bEv$ since that implies $\bEv' \xeco \bEv$ and a $\reco$ cycle $\bEv \slt \aEv \slt \aEv' \xeco \bEv$. Thus, the above lemma can be iterated if the new pomset is has any further reads that precede $\bEv$ in $\reco$, so we can finally derive a pomset with no reads and writes satisfying the hypothesis of the lemma.
The $\reco$ order between writes that are not related by $\lt$ can be reversed.
A canonical example to keep in mind for this lemma is the program:
\begin{align*}
(x\GETS 1)
\PAR (x\GETS 0)
\end{align*}
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{x}{1}}{}
\event{wx0}{\DW{x}{0}}{right=of wy0}
\wk{wy0}{wx0}
\end{tikzdisplay}
The lemma constructs the execution:
\begin{tikzdisplay}[node distance=1em]
\event{wy0}{\DW{x}{1}}{}
\event{wx0}{\DW{x}{0}}{right=of wy0}
\wk{wx0}{wy0}
\end{tikzdisplay}
\begin{lemma}\label{cohww}
Let $\aPS \in \tsem{\aCmd}$ be a top level pomset. Let $\bEv, \aEv$ be a writes to $\aLoc$ such that:
\begin{itemize}
\item $\bEv\gtN \aEv$
\item for all writes $\cEv$ to $\aLoc$ such that $ \bEv \gtN \cEv \gtN \aEv$, it is the case that $ \neg(\cEv \slt \aEv)$ and $\neg(\cEv \xpox \aEv)$
\end{itemize}
Then, there exists $\bPS \in \tsem{\aCmd}$ such that $\Event_{\aPS} = \Event_{\bPS}$, $\sle_{\aPS} = \sle_{\bPS}$, and
$\aEv \gtN \bEv$ in $\bPS$.
\end{lemma}
\begin{proof}
We show how to interchange $\aEv, \bEv$ adjacent in $\gtN$, ie. we assume that $\neg(\exists \cEv) \ \bEv \gtN \cEv \gtN \aEv$. The full proof follows by induction.
Since $\sem{\aCmd}$ is augmentation closed, it suffices to show that we can build $\bPS$ while satisfying the constraints between $\slt,\gtN$. We list the changes below.
\begin{itemize}
\item $\aEv \gtN \bEv$ in $\bPS$
\item For all reads $\cEv$ matched to $\aEv$, change from $\bEv \gtN \cEv$ in $\aPS$ to $\cEv \gtN \bEv$ in $\bPS$
\item For all reads $\cEv$ matched to $\bEv$, change from $\cEv \gtN \aEv$ in $\aPS$ to $\aEv \gtN \cEv$ in $\bPS$
\popQED
\end{itemize}
\end{proof}
\section{Proof of DRF}\label{drfproof}
In this section of the appendix, we develop a proof of DRF for \tvalpom s. By the results in the earlier section, it yields DRF for the pomset semantics, since the races are identical in both models.
In the rest of this section, we assume that $\aPS$ is a generator for
$\tsem{\aCmd}$.
We prove:
\begin{description}
\item[DRF1: ] If $\aPS$ does not have a race, $\aPS \in \tsemsc{\aCmd}$.
\item[DRF2: ] If $\aPS$ has a race, then there exists $\bPS\in \tsemClosed{\aCmd}$ such that $\bPS \in \tsemsc{\aCmd}$ and has a race.
\end{description}
\paragraph*{Proof of DRF1}
We first show that if $\aPS \in \tsem{\aCmd} \setminus \tsemsc{\aCmd}$, then $\aPS$ has a race. By assumption, there is a cycle in $\rpox \cup \slt \cup \xeco$. Let this cycle be $\aEv_0, \aEv'_0, \aEv_1, \aEv'_1, \ldots, \aEv_n, \aEv'_n, \aEv_0$ where for all $i$, $\aEv_i \xpox \aEv'_i$ and $\aEv'_i \not\xpox \aEv'_{i+1}$.
If for all $i$, $\aEv'_i \xhb \aEv'_{i+1}$, then the above is a cycle in $\rhb$, which is a contradiction.
So, there is at least one $i$ such that $\aEv'_i \not\xhb \aEv'_{i+1}$. There are two cases to consider.
\begin{itemize}
\item $\aEv'_i \xeco \aEv'_{i+1}$. In this case, there is a race.
\item $\aEv'_i \slt \aEv'_{i+1}$. In this case, $\aEv'_i$ is a write and $\aEv'_{i+1}$ is a conflicting read, so there is a race.
\end{itemize}
\paragraph*{Proof of DRF2}
We define a size $|\aPS|$ as follows: $\size(\aPS)$ is the number of events in $\aPS$. Since we are considering loop free programs, there is an $\aPS \in \tsemsc{\aCmd}$ with maximum size, which we identify as $\size(\aCmd)$.
We prove by induction on $\size(\aCmd) - \size(\bPS)$ that given $(\aPS, \bPS)$ such that:
\begin{itemize}
\item $\bPS$ is a prefix of some $\aPS' \in \tsemsc{\aCmd}$
\item $\bPS$ is a prefix of $\aPS$ under all of $\xpox,\gtN,\lt$
\item $\aPS$ has a race
\end{itemize}
there exists $\bPS\in \tsem{\aCmd}$ that demonstrates the race.
The required theorem follows by setting $\bPS$ to be the empty pomset.
For the base case, $\bPS = |\aPS|$. In this case, $\aPS$ is the required witness.
Otherwise, consider a maximal sequential prefix, extending $\bPS$, w.r.t.~all of $\rpox,\reco,\slt$. If it strictly contains $\bPS$, result follows from induction hypothesis.
If not, $\bPS$ is already maximal. Consider the set of all events in $\aPS \setminus \bPS$ that are minimal w.r.t.~$\rhb$. In particular, these events will also be minimal w.r.t.~$\rpox$.
If one of these events, say $\aEv$ is a write, we proceed as follows. Using $\rhb$-minimality of $\aEv$, we deduce $\rpox$ minimality of $\aEv$. Using the generator properties, we deduce that $\aEv$ is $\slt$-minimal . Using lemma~\ref{removerw}, we build $\aPS_1$ from $\aPS$ without changing $\bPS$ to ensure that there are is no read $\bEv \in \aPS_1 \setminus \bPS$ such that $\bEv \xeco \aEv$. Using lemma~\ref{cohww}, we build $\aPS_2$ from $\aPS_1$ without changing $\bPS$ to ensure that there are is no write $\bEv \in \aPS_2 \setminus \bPS$ such that $\bEv \xeco \aEv$. Thus, $\aEv$ is $\reco$-minimal in $\aPS_2 \setminus \bPS$. Result follows from induction hypothesis by considering $(\aPS_2,\bPS_1)$ where $\bPS_1$ is got from $\bPS$ by adding $\aEv$.
So, we can assume that all events in $\aPS \setminus \bPS$, say $\aEv_0, \ldots, \aEv_n$ that are minimal w.r.t.~$\rhb$ are reads, and we have events
$\aEv'_0, \aEv'_1, \ldots, \aEv'_n, \aEv_0$ such that:
\[
\begin{array}{lrl}
\aEv_i \xpox\ \aEv'_i \\
\aEv'_i \ (\reco\ \cup \slt) \ \aEv_{(i+1)\mod n}
\end{array}
\]
Let $\bEv$ be the matching write for $\aEv_{(i+1)\mod n}$. If $\bEv_i \in \bPS$bEv , then by $\reco$ prefix closure of $\bPS$, $\bEv \xeco\ \aEv'_i$ and $\aEv_{(i+1)\mod n} \reco\ \aEv'_i$, which is a contradiction to $\reco$ being a partial order per location. So, we can assume that $\aEv'_i \ \slt \ \aEv_{(i+1)\mod n}$.
We proceed as follows. We use lemma~\ref{inputen} on the pomset $\aPS$ and read $\aEv_{(i+1)\mod n}$ and write $\aEv'_i$ to construct $\cPS$ that changes the value read in $\aEv_{j}$ to a value from $\bPS$. $\dPS$ is derived adding the modified read yielded by lemma~\ref{inputen} to $\bPS$. Result follows by induction hypothesis since $\dPS$ is a prefix of $\cPS$ under all of $\xpox,\lt, \reco$, $\cPS$ has a race, and $\size(\dPS) = \size(\bPS) + 1$.
\endinput
\begin{comment}
Operation Implementation
Relaxed read ldr
Relaxed write str
Acquiring read ldar
Releasing write stlr
Fence dmb.sy
\end{comment}
\begin{comment}
ob does not contradict eco
ob does not contradict (co cap po):
Suppose that wx1 po wx2 then it cannot be that wx2 ob wx1.
We know that wx1 co wx2 by SC-PER-LOC
% Case 1. w1 is read externally, then we have
% wx1 rfe r
% and
% r fre w2
% so
% wx1 obs+ wx2
% which contradicts EXTERNAL
% Case 2. wx1 is not read externally.
We show this by contradiction
Assume
wx1 co wx2
and
wx2 ob wx1
Note that
po supseteq dob cup aob cup bob
So in order to get order into wx1, we must have
wx2 (ob?; obs; ob?; obs; ob?) wx1
Note that we cannot have dob or bob into wx1 after obs, since then we would
also have it into wx2, creating a cycle in EXTERNAL. This holds because both
dob and bob are closed on the right w.r.t. coi
So it must be that
wx2 (ob?; obs; ob?; wx0; coe) wx1,
in which case we also have wx0 coe wx2, contradicting EXTERNAL
or
wx2 (ob?; obs; ob?; rx0; fre) wx1
in which case we also have rx0 fre wx2, contradicting EXTERNAL
Internal reads do not need to respect ob:
Arm allows the following:
Ra1 -ctrl-> Wx1 -rfi-> Rx1 ---> Wb1 if(a){x=1}; b=x
| |
Wa1 <-------------------------- Rb1 a=b
Suppose that wx1 po rx2 and rx2 is read externally.
Then it cannot be that rx2 ob wx1.
Case 1: if wx1 co wx2, then we have wx1 coe wx2 rfe rx2, contradicting EXTERNAL
Case 2: if wx2 co wx1, then we have rx2 fr wx1, contradicting SC-PER-LOC
Suppose that rx1 po wx2 and rx1 is read externally.
Then it cannot be that wx2 ob rx1.
Case 1: if wx2 co wx1, then wx2 co wx1 rf rx1 po wx2, contradicting SC-PER-LOC
Case 2: if wx1 co wx2, for a contradiction, suppose wx2 ob rx1.
then we need another thread involved to get order from wx2 to rx1.
To get order into the read, there are several options:
- use cross thread read, then dob; but dob does not include reads in it's domain.
An attempt to do this is something like:
Wx1 x=1
|
Ra2 -ctrl-> Rx1 - - -> Wx2 if(a){r=x}; x=2
| |
Wa2 <----------------- Rx2 a=x
But the ctrl dependency is not included in ob between reads.
- use cross thread read then barrier, but then you contradict EXTERNAL
- create and ob edge from Rx2 to Wx1.
An attempt to do this is,
Wx1 <-------------- Ra1
| | But cannot get Wx2 --> Wa1 without a barrier
Rx1 - - -> Wx2 ---> Wa1
Wx1 <----- Rx2
| | contradicts SC-PER-LOC
Rx1 - - -> Wx2
Other examples to type in:
Allowed:
Rx1 -> Wy0 Wy1
Ry1 -> Wz0 Wz1
Rz1 -> Wx0 Wx1
Forbidden:
Rx1 -> Wy0 Wy1
Ry1 -> Wx0 Wx1
\end{comment}
\begin{comment}
\citet{DBLP:journals/pacmpl/PodkopaevLV19} define the \emph{Intermediate
Memory Model (IMM)} and provide efficient implementations of the IMM into
several processor architectures, including TSO, ARMv8 and Power.
In this section, we show that any execution allowed by a sublanguage of the
IMM is also allowed by our semantics. The sublanguage we consider bans
loops, read-modify-write (RMW) operations, and fences. In addition, we take
the set of memory locations, $\Loc$, to be finite. Syntactically, we drop
the superscript \textsf{rlx} on relaxed reads and writes; in addition, we use
structured conditionals rather than the more general \textsf{goto}. We refer
to this sublanguage as $\muIMM$.
$\muIMM$ programs sit in the restriction-free fragment of our language, where
all memory locations are initialized to $0$ and parallel-composition occurs
only at top level. In other words, $\muIMM$ programs have the form
\begin{displaymath}
{\aLoc_1}\GETS{0}\SEMI
\cdots\SEMI
{\aLoc_m}\GETS{0}\SEMI
(\aCmd^1 \PAR \cdots \PAR \aCmd^n)
\end{displaymath}
where $\aCmd^1$, \ldots, $\aCmd^n$ do not include either composition or
restriction.
Due to space limitations, we do not include a full description of the IMM.
The broad strokes of the argument given here should be clear, but interested
readers will need to refer to \citep{DBLP:journals/pacmpl/PodkopaevLV19} for
details.
\end{comment}
\endinput
\section{Proof of DRF}
For any $\aPS$, then $\closed(\aPS)$ is set enriched with useless reads
(preserving augmentation closure) and where we remove any event whose
precondition is not a tautology.
For top level programs:
\begin{displaymath}
\semClosed{\VAR\vec{\aLoc}\SEMI
\vec{\aLoc}\GETS\vec{0}\SEMI
\vec{\bLoc}\GETS\vec{0}\SEMI
\FENCE\SEMI
(\aCmd^1 \PAR \cdots \PAR \aCmd^n)}
=
\VAR\vec{\aLoc}\SEMI
\vec{\aLoc}\GETS\vec{0}\SEMI
\vec{\bLoc}\GETS\vec{0}\SEMI
\FENCE\SEMI
(\semClosed{\aCmd^1} \PAR \cdots \PAR \semClosed{\aCmd^n})
\end{displaymath}
\begin{definition}
A thread: top level component of a parallel composition
\end{definition}
\begin{definition}
$\aPS$ is a generator of $\semClosed{\aCmd}$ if for all $\bPS \in \semClosed{\aCmd}$ such that $\aPS$ augments $\bPS$, $\aPS = \bPS$.
\end{definition}
Since the program we consider are loop free, for any command $\aCmd$, the size of the pomsets in $\aCmd$ are bounded by a constant, that we denote by $\size(\aCmd)$.
\section{Generators for semantics of programs with parallel composition}
All generators $\aPS$ satisfy the following factorization of cross-thread $\lt$.
\begin{lemma}\label{pargen}
Consider the subset of pomsets of $\semClosed{\aCmd \PAR \bCmd}$ that are $\aLoc$-closed for all $\aLoc$.
Let $\aPS$ be any generator.
%\begin{itemize}
% \item
Let $\aEv\lt\bEv$ and $\aEv \in \semClosed{\aCmd}$ and $\bEv \in \semClosed{\bCmd} $.
Then there is a write $\aEv' \in \semClosed{\aCmd}$, and a read $\bEv' \in \semClosed{\bCmd}$ such that $\bEv'$ reads-from $\aEv'$ and $\aEv \lt \aEv' \lt \bEv' \lt \bEv$.
%\item $\aEv \gtN \bEv$ only if $ \aEv [\lt \cup (\le; \reco;\le)^{\star}]
%\bEv$.
% \item If $\aEv\lt\bEv$ and $\aEv, \bEv \in \semClosed{\aCmd}$,
%then there exists
%There exists a release action $\aEv'$ in $\sem{\aCmd}$, a
%matching acquire action $\bEv'$ in $\sem{\bCmd}$ such that $
%\aEv \lt \aEv'$, $\bEv' \lt \bEv$ and $\aEv' \lt \bEv'$.
\end{lemma}
The proof of lemma~\ref{cohsat} yields the following two corollaries.
\begin{corollary}\label{cohrw}
Let $\aPS \in \sem{\aCmd}$ be a generator. Let
\begin{itemize}
\item $\bEv'$ be a read from $\aLoc$ with matching write $\bEv$. \item $\aEv$ be a write to $\aLoc$ such that $\bEv' \gtN \aEv$. \item Forall writes $\cEv$ to $\aLoc$ such that $ \bEv \gtN \cEv \gtN \aEv$, it is the case that $ \neg(\bEv' \lt \cEv)$ and $\neg(\bEv \xpox \cEv) ]$
\end{itemize}
Then, there exists $\bPS \in \sem{\aCmd}$, also a generator, such that $\Event_{\aPS} = \Event_{\bPS}$, $\le_{\aPS} = \le_{\bPS}$, and $\aEv \gtN \bEv'$ in $\bPS$.
\end{corollary}
\begin{corollary}\label{cohwr}
Let $\aPS \in \sem{\aCmd}$ be a generator. Let
\begin{itemize}
\item $\aEv'$ read from $\aLoc$ with matching write $\aEv$.
\item $\bEv$ be a write to $\aLoc$ such that $\bEv \gtN \aEv'$. \item Forall writes $\cEv$ to $\aLoc$ such that $ \bEv \gtN \cEv \gtN \aEv$ and $\cEv \not= \aEv$, it is the case that $ \neg(\cEv \lt \aEv')$ and $\neg(\cEv \xpox \aEv) ]$.
\end{itemize}
Then, there exists $\bPS \in \sem{\aCmd}$, also a generator, such that:
$\Event_{\aPS} = \Event_{\bPS}$, $\le_{\aPS} = \le_{\bPS}$, and
$\aEv' \gtN \bEv$ in $\bPS$.
\end{corollary}
===============good lemma. Not used. ==================
\begin{definition}
$ \aEv \xeco \bEv$ if both $\aEv$ and $\bEv$ touch the same location, at least one is a write, and $\aEv \xird \bEv$ or $\aEv \xrb \bEv$ or $\aEv\xird \bEv$ or $\bEv \gtN \aEv$.
\end{definition}
By lemma~\ref{extendob}, if $\aEv \not=\aEv'$, we deduce $\aEv \xob \bEv'$, and thus $\aEv \xob \bEv$. If $\bEv \not=\bEv'$, we deduce $\aEv' \xob \bEv$ and thus $\aEv \xob \bEv$.
Thus, if $\aEv \not=\aEv'$ or $\bEv \not=\bEv'$, then there is a cycle $\aEv \xob \bEv \xob \cEv \xob \cEv' \xob \aEv$.
So we can assume that $\aEv' = \aEv$, $\bEv' = \bEv$ and
\[ \aEv \xeco \bEv \xob \cEv \xob \cEv' \xeco \aEv \]
where all of $\aEv, \bEv, \cEv, \cEv'$ access the same location and at least one of $\aEv,\bEv$ is a write, at least one of $\aEv,\cEv'$ is a write, and at least one of $\bEv,\cEv$ is a write.
We reason by cases.
\begin{itemize}
\item If $\cEv'$ is a write or both $(\aEv, \bEv)$ are writes.
We deduce that $\bEv \xeco \cEv' \xeco \aEv$ and thus $\bEv \xeco \aEv$.
\item $\cEv'$ is a read. $\aEv$ is a write. $\bEv$ is a read.
In this case $\cEv$ is a write. From $\cEv \xob \aEv$, we deduce $\cEv \xeco \aEv$. Combining with $\bEv \xeco \cEv$, we deduce that $\bEv \xeco \aEv$.
\end{itemize}
In either case, there is a contradiction $\aEv \xeco \bEv \xeco \aEv$.
Consider the write $\cEv'$ fulfilling $\aEv$.
$\cEv' (\xobi \cap \xeco) \aEv$. Since $\aEv$ not $\rrfi$ event.
Also,
So, we can assume that $\aEv \xpox \bEv$, and the situation is:
\[ \cEv' \xobi \cEv (\xpox \cap \xobi) \aEv (\xeco \cap \xpox) \bEv (\xpox \cap \xobi) \cEv'' \xobi \cEv' \]
By lemma~\ref{extendob}, if $\cEv \not= \aEv$, $\cEv \xob \bEv$, and we have a cycle in $\xob$.
Similarly, if $\bEv \not= \cEv''$, $\aEv \xob \cEv''$, and we have a cycle in $\xob$. So, the situation is:
\[ \cEv' \xobi \aEv (\xeco \cap \xpox) \bEv \xobi \cEv' \]
$\cEv',\aEv,\bEv$ are events on same variable. The above is a cycle in $\xeco$.
| {
"alphanum_fraction": 0.6574304368,
"avg_line_length": 42.045751634,
"ext": "tex",
"hexsha": "82cb4609778b8971fbcad66f82cde7d5f54642d1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "chicago-relaxed-memory/memory-model",
"max_forks_repo_path": "drf-proof.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "chicago-relaxed-memory/memory-model",
"max_issues_repo_path": "drf-proof.tex",
"max_line_length": 774,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "fd606fdb6a04685d9bb0bee61a5641e4623b10be",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "chicago-relaxed-memory/memory-model",
"max_stars_repo_path": "drf-proof.tex",
"max_stars_repo_stars_event_max_datetime": "2021-07-25T12:46:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-08-13T02:36:22.000Z",
"num_tokens": 9238,
"size": 25732
} |
\newpage{}
\hypertarget{a004---reducing-dispersion-by-assigning-a-concrete-value-per-word-and-learning-from-it}{%
\section{A004 - reducing dispersion by assigning a concrete value per
word and learning from
it}\label{a004---reducing-dispersion-by-assigning-a-concrete-value-per-word-and-learning-from-it}}
A001 collects as many different values for each word as it can get. When
estimating, it uses any of those values more or less randomly (of cause
smoothend by the fact that we take 100 random values and then only use a
value from a certain position representing the percentage of certainty
we want to have). Anyways, that hinders our ability to ``learn''.
The idea behind this algorithm is to assign just one value to each word.
This way, when we see our error margin we might design a little learning
algorithm.
For example: - create an average model - while the mean squared error is
higher than \ldots{} - create 10 mutation of that model, for example by
randomly adding or substracting 1/10th of the value to each weight of
each word. - calculate the mean squared error for each mutation - take
the model with the least mean squared error and repeat
| {
"alphanum_fraction": 0.7876182287,
"avg_line_length": 50.5652173913,
"ext": "tex",
"hexsha": "239e22029776800b18fa503b502d3864cf2a69f8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4f63d75dd56f56c05d9a046b98f21cff04971a08",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "stho32/Automatically-Estimating-Task-Durations",
"max_forks_repo_path": "Documentation/10000-_Algorithms/A007/index_fr.tex",
"max_issues_count": 52,
"max_issues_repo_head_hexsha": "4f63d75dd56f56c05d9a046b98f21cff04971a08",
"max_issues_repo_issues_event_max_datetime": "2021-09-26T10:01:19.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-08-13T00:24:46.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "stho32/Automatically-Estimating-Task-Durations",
"max_issues_repo_path": "Documentation/10000-_Algorithms/A004/index_fr.tex",
"max_line_length": 102,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "4f63d75dd56f56c05d9a046b98f21cff04971a08",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "stho32/Automatically-Estimating-Task-Durations",
"max_stars_repo_path": "Documentation/10000-_Algorithms/A004/index_fr.tex",
"max_stars_repo_stars_event_max_datetime": "2021-09-22T06:43:27.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-09-12T17:24:38.000Z",
"num_tokens": 277,
"size": 1163
} |
\cleardoublepage
\renewcommand{\papertext}{Paper 02: How accurate are estimates of glacier ice thickness? Results from ITMIX, the Ice Thickness Models Intercomparison eXperiment}
\section*{\papertext}
\addcontentsline{toc}{section}{\protect\numberline{}\papertext}%
\label{paper_02}
\vspace{0.5cm}
\begin{singlespace}
\begin{hangparas}{1em}{1}
Farinotti, D., Brinkerhoff, D. J., Clarke, G. K. C., Fürst, J. J., Frey, H., Gantayat, P., Gillet-Chaulet, F., Girard, C., Huss, M., Leclercq, P. W., Linsbauer, A., Machguth, H., Martin, C., \textbf{Maussion, F.}, Morlighem, M., Mosbeux, C., Pandit, A., Portmann, A., Rabatel, A., Ramsankaran, R., Reerink, T. J., Sanchez, O., Stentoft, P. A., Singh Kumari, S., van Pelt, W. J. J., Anderson, B., Benham, T., Binder, D., Dowdeswell, J. A., Fischer, A., Helfricht, K., Kutuzov, S., Lavrentiev, I., McNabb, R., Gudmundsson, G. H., Li, H. and Andreassen, L. M.: How accurate are estimates of glacier ice thickness? Results from ITMIX, the Ice Thickness Models Intercomparison eXperiment, Cryosph., 11(2), 949--970, \href{https://doi.org/10.5194/tc-11-949-2017}{doi:10.5194/tc-11-949-2017}, 2017.
\end{hangparas}
\end{singlespace}
\vspace{0.5cm}
This paper is the result of an international working group of the International Association of
Cryospheric Sciences (IACS): the working group on \href{https://cryosphericsciences.org/activities/ice-thickness}{Glacier ice thickness estimation} (2014--2019),
of which I was a member. The group activities led to several major publications, two of them are presented in
this thesis.
The first collaborative effort of the working group was the “Ice Thickness Models Intercomparison eXperiment (ITMIX)”,
phase 1. It was the first coordinated assessment of the individual performance of independent methods able to infer
glacier ice thickness from characteristics of the surface. A set of 17 different models were used to estimate the ice
thickness “blindly”, i.e. without using any observations for model calibration or tuning.
This publication is a milestone in the field of glacier ice thickness estimation. Cited more than 110 times to date (
Google Scholar), it laid the ground for a wealth of follow-up studies, leading several research groups (including mine)
to increase efforts in developing new methods to estimate the volume of glaciers. Indeed, we showed that the
disagreement between models themselves and between models and observations can be very large (up to several times the
observed ice thickness). Ensemble approaches may reduce model errors, but no model consistently outperformed the others.
A few models were favorably listed for their ability to robustly simulate many glaciers with a reasonable accuracy (e.g.
OGGM, Huss), or for their high accuracy on fewer glaciers (e.g. Brinkerhoff-v1),
My contribution to this paper was the participation in several meetings that helped to shape the experimental design,
and I participated with a model contribution. OGGM was ranked among the best models able to estimate ice thickness from
limited information (i.e. able to compute many glaciers). I also contributed to the analysis of the
results, and played a minor role in the writing of the paper.
\href{https://doi.org/10.5194/tc-11-949-2017}{Link to the paper} (open access).
\iflong \includepdf[pages=-,openright]{./papers/paper_02.pdf} \else \fi
| {
"alphanum_fraction": 0.7654577883,
"avg_line_length": 70.0833333333,
"ext": "tex",
"hexsha": "2055b026592895e94f605d6d6d75c637c3cf8b0a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5133e6dfa7d192c3e82e2ea6c15438d1d3194a90",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "fmaussion/habil2.0",
"max_forks_repo_path": "tex/chapters/paper_02.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5133e6dfa7d192c3e82e2ea6c15438d1d3194a90",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "fmaussion/habil2.0",
"max_issues_repo_path": "tex/chapters/paper_02.tex",
"max_line_length": 791,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5133e6dfa7d192c3e82e2ea6c15438d1d3194a90",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "fmaussion/habil2.0",
"max_stars_repo_path": "tex/chapters/paper_02.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 908,
"size": 3364
} |
\chapter*{Summary}
TODO | {
"alphanum_fraction": 0.75,
"avg_line_length": 12,
"ext": "tex",
"hexsha": "edd4cab4c73d6cf1a521e38c8a1b7228fd3a3856",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eff4ac2d9457554c394f43b3d161de1c9191e64c",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "sebivenlo/tectonic-thesis-skeleton",
"max_forks_repo_path": "src/chapters/summary_en.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "eff4ac2d9457554c394f43b3d161de1c9191e64c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "sebivenlo/tectonic-thesis-skeleton",
"max_issues_repo_path": "src/chapters/summary_en.tex",
"max_line_length": 19,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "eff4ac2d9457554c394f43b3d161de1c9191e64c",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "sebivenlo/tectonic-thesis-skeleton",
"max_stars_repo_path": "src/chapters/summary_en.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-02T16:07:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-02T16:07:26.000Z",
"num_tokens": 7,
"size": 24
} |
\documentclass{article}
\newsavebox{\oldepsilon}
\savebox{\oldepsilon}{\ensuremath{\epsilon}}
\usepackage[minionint,mathlf,textlf]{MinionPro} % To gussy up a bit
\renewcommand*{\epsilon}{\usebox{\oldepsilon}}
\usepackage[margin=1in]{geometry}
\usepackage{graphicx} % For .eps inclusion
%\usepackage{indentfirst} % Controls indentation
\usepackage[compact]{titlesec} % For regulating spacing before section titles
\usepackage{adjustbox} % For vertically-aligned side-by-side minipages
\usepackage{array, amsmath, mhchem}
\usepackage{hyper ref}
\usepackage{courier, subcaption}
\usepackage{multirow, color}
\usepackage[autolinebreaks,framed,numbered]{mcode}
\usepackage{float}
\restylefloat{table}
\pagenumbering{gobble}
\setlength\parindent{0 cm}
\renewcommand{\arraystretch}{1.2}
\begin{document}
\large
\section*{Positive feedback}
All systems that exhibit bistability implement positive feedback. In the mutual repression example, this positive feedback is indirect: protein X increases its own production rate by alleviating repression from Y. The result is that $X$ causes its own \textit{derepression} -- a form of positive feedback.\\
It's possible to make a bistable switch from a single transcription factor by utilizing positive feedback. Suppose X is a transcriptional activator with a maximum expression rate $\alpha$ and degradation rate $\beta$. As previously, the general expression for the rate of change in $x$ is:
\[ \frac{dx}{dt} = \alpha h(x) - \beta x \]
where $0<h(x)<1$. We'll assume that $x$ binds its own promoter and that production of $x$ scales with the likelihood that $x$ is bound there, which in turn is determined by a Hill curve:
\[ h(x) = \frac{x^n}{K + x^n} \rightarrow \frac{x^n}{1 + x^n} \textrm{ on appropriate choice of units for [X]} \]
Then the equation for our system is:
\[ \frac{dx}{dt} = \frac{\alpha x^n}{1 + x^n} - \beta x \]
The fixed points of this system satisfy:
\[ x = \frac{\alpha x^{n}/\beta }{1 + x^{n}} \]
By plotting both the left-hand and right-hand side on one axis, we can see that when $n=1$ and the right-hand side is hyperbolic, then there are two intersections, one at the origin and another at a positive value of x. Only the right-most fixed point is stable.\\
If the right-hand side has a sigmoidal shape (i.e. $n>1$), then it is possible to have one, two, or three points of intersection. Notice that as we increase $\alpha/\beta$ from a low value, where we have only one intersection at the origin, a new fixed point appears ``out of the clear blue sky" at a positive value of $x$. This point is half-stable and quickly gives way to two fixed points, one of which is stable and the other unstable. This is called a \textit{saddle-node} (or sometimes, ``blue sky") bifurcation.
\begin{figure}[htp] \centering{
\includegraphics[width=1 \textwidth]{pfb1.pdf}}
\caption{Illustration of the emergence of a new fixed point as $\alpha/\beta$ increases. Hill coefficient $n=2$.} \label{fig:pfb}
\end{figure}
At this point it may be tempting to conclude that we are done analyzing this system, but as $\alpha/\beta$ continues to increase, the middle fixed point slides arbitrarily close to the origin so that (assuming any noise is present) the origin becomes functionally unstable. (Bifurcation diagram.) Systems of this type exhibit hysteresis (review this term).
\section*{The lac operon}
Bistable switches like this one were among the first identified in real biological systems. Bacterial cells use it to switch between two gene expression states based on the food sources available. \textit{E. coli}, for example, are capable of using lactose (a sugar in milk) as a carbon source. To do this, they must invest energy in making transporters that carry lactose (LacY) into the cell as well as enzymes (LacZ) that break lactose into its component simple sugars, glucose and galactose. LacZ and LacY proteins are not useful the vast majority of the time, since lactose is not bacteria's preferred carbon source. However, when lactose is present, the cell must be able to turn on these genes. How does the cell achieve this?\\
In bacteria, genes of related function are often transcribed together on the same mRNA (i.e. in an \textit{operon}). This means that their expression is regulated by the same promoter. \textit{lacZ} and \textit{lacY} are part of the \textit{lac} operon, which is regulated by a transcriptional repressor called LacI. LacI is bound to the \textit{lac} operon's promoter most of the time, so that these genes are not expressed. (You may recall that LacI was the first example we used when discussing how transcription factors can bind cooperatively when there are multiple nearby sites.) However, when lactose is present inside the cell, it can bind to LacI and cause this repressor to fall of the \textit{lac} promoter, allowing LacZ and LacY to be expressed. The lac operon system uses a repressor: does it display positive feedback, and if so, can we shoehorn it into our model?\\
The positive feedback in this system is the result of the lactose transporter, LacY. Suppose a bacterial cell has been growing for a long time in medium without lactose, so that very little LacZ or LacY are being expressed. We add a little lactose to the medium: what happens? With no LacY present, the lactose is impeded from entering the cell. We must add a high enough concentration of lactose that it is able to ``seep in" before LacI is inhibited and LacZ/Y get expressed. Now LacY will let more lactose into the cell, so more and more LacY will be expressed. Similarly, if we start at a high concentration of lactose and decrease, the \textit{lac} operon will tend to maintain its expression until [lactose] is very low. (Draw the hysteresis and compare to the positive feedback above.)\\
This is all well and good, but is the Hill equation an appropriate production term for [LacY], i.e.
\[ \frac{d\left[ \textrm{LacY}\right]}{dt} = \alpha h \left( \left[ \textrm{LacY} \right] \right) - \beta \left[ \textrm{LacY} \right] \stackrel{?}{=} \frac{\alpha \left[ \textrm{LacY}\right]^n}{1 + \left[ \textrm{LacY}\right]^n} - \beta \left[ \textrm{LacY}\right]\]
The easiest way to understand this is to interpret the production term for LacY as a function of the concentration of active repressor, which in turn depends on how much lactose is being admitted by the cell (i.e. [LacY] and [lactose]).\\
We'll assume, as we did on Friday, that the production function $h$ has domain $[0,1]$. It will be one when none of the repressor is active and zero when all of the repressor is bound, hence:
\[ h \left( \left[ \textrm{LacY} \right] \right) = 1 - P\left( \textrm{LacI is bound }\right) = \frac{K_i}{K_i + \left[\textrm{LacI}_{\textrm{active}} \right]} \]
Technically we would be well within our rights to assume that this binding of LacI to its operator is cooperative. We know that LacI is a tetramer and that it has multiple binding sites in/near the \textit{lac} operon's promoter. (Recall that we used it in the ball-and-cup analogy earlier in the course.) However we will see that this cooperativity in LacI binding is not essential for bistability in the lactose response. \\
What fraction of LacI is active? LacI is a tetramer that becomes inactivated when lactose\footnote{Technically it is not lactose that inhibits the repressor, but rather one of its metabolic derivatives, allolactose.} binds allosterically to any one of its subunits. For an appropriate choice of lactose concentration units,
\begin{eqnarray*}
\frac{\left[\textrm{LacI}_{\textrm{active}} \right]}{\left[\textrm{LacI}_{\textrm{total}} \right]} & = & P\left( \textrm{lactose not bound to any subunit} \right)\\
& = & \left[ P\left( \textrm{lactose not bound to one subunit} \right) \right]^4\\
& = & \left( \frac{K_r}{K_r + \left[ \textrm{lactose}_{\textrm{int}} \right]} \right)^4
\end{eqnarray*}
The subscript indicates the \textit{internal} concentration of lactose. To understand what the internal concentration of lactose will be, we consider how fast lactose is entering the cell and how fast it is being consumed:
\begin{eqnarray*}
\frac{d \left[ \textrm{lactose}_{\textrm{int}} \right]}{dt} & = & \frac{k_{\textrm{cat}} \left[ \textrm{LacY} \right] \left[ \textrm{lactose}_{\textrm{ext}} \right]}{K_m + \left[ \textrm{lactose}_{\textrm{ext}} \right]} - \beta_s \left[ \textrm{lactose}_{\textrm{int}} \right]\\
& \approx & \frac{k_{\textrm{cat}}}{K_m} \left[ \textrm{LacY} \right] \left[ \textrm{lactose}_{\textrm{ext}} \right] - \beta_s \left[ \textrm{lactose}_{\textrm{int}} \right]
\end{eqnarray*}
Here we have used an approximation that $\left[ \textrm{lactose}_{\textrm{ext}} \right] \ll K_m$, i.e., LacY is operating in its first-order regime. If we assume that the internal concentration of lactose is at quasi-steady-state, then we have [lactose$_{\textrm{int}}$] as a function of [LacY] and [lactose$_{\textrm{ext}}$]:
\begin{eqnarray*}
\left[ \textrm{lactose}_{\textrm{int}} \right] & = & \frac{k_{\textrm{cat}}}{\beta_s K_m} \left[ \textrm{LacY} \right] \left[ \textrm{lactose}_{\textrm{ext}} \right]\\
\left[\textrm{LacI}_{\textrm{active}} \right] & = & \left[\textrm{LacI}_{\textrm{total}} \right] \left( \frac{K_r}{K_r + \left[ \textrm{lactose}_{\textrm{int}} \right]} \right)^4\\
& = & \left[\textrm{LacI}_{\textrm{total}} \right] \left( \frac{K_r}{K_r + \frac{k_{\textrm{cat}}}{\beta_s K_m} \left[ \textrm{LacY} \right] \left[ \textrm{lactose}_{\textrm{ext}} \right]} \right)^4\\
h \left( \left[ \textrm{LacY} \right] \right) & = & \frac{K_i}{K_i + \left[\textrm{LacI}_{\textrm{active}} \right]}\\
& = & \frac{K_i}{K_i + \left[\textrm{LacI}_{\textrm{total}} \right] \left( \frac{K_r}{K_r + \frac{k_{\textrm{cat}}}{\beta_s K_m} \left[ \textrm{LacY} \right] \left[ \textrm{lactose}_{\textrm{ext}} \right]} \right)^4}\\
& = & \frac{K_i \left( K_r + \frac{k_{\textrm{cat}}}{\beta_s K_m} \left[ \textrm{LacY} \right] \left[ \textrm{lactose}_{\textrm{ext}} \right] \right)^4}{K_i \left( K_r + \frac{k_{\textrm{cat}}}{\beta_s K_m} \left[ \textrm{LacY} \right] \left[ \textrm{lactose}_{\textrm{ext}} \right] \right)^4 + \left[\textrm{LacI}_{\textrm{total}} \right] K_r^4 }
\end{eqnarray*}
This is not a Hill function of [LacY]; however, it does have sigmoidal character. Notice that the production level of LacY is never precisely zero, even when there is no external lactose:
\[ \left[ \textrm{lactose}_{\textrm{ext}} \right] \to 0: \hspace{2 cm} h\left( \left[\textrm{LacY}\right] \right) \to \frac{K_i}{K_i + \left[\textrm{LacI}_{\textrm{total}} \right]} \]
It turns out to make a big difference that the intercept is positive. Recall that at any fixed point $ \left[\textrm{LacY}\right]_{s-s}$,
\[ y_1 \equiv \left[ \textrm{LacY} \right]_{ss} = \frac{\alpha}{\beta} h \left( \left[\textrm{LacY}\right]_{ss} \right) \equiv y_2 \]
so that the fixed points occur at intersections of $y_1$ and $y_2$. As we increase [lactose$_{\textrm{ex}}$] from zero, we go from a single intersection (at a low LacY concentration) to two, to three, back to two, and finally to a single steady-state (at a high LacY concentration). (Draw sample intersections and finally the bifurcation curve.) Unlike when the production term was a Hill function, it is now possible to truly, not just functionally, lose the lower stable state. (Discussion of hysteresis in this system.)\\
Although this is a canonical result in molecular biology (dating to Novick and Weiner's 1957 paper), the system is still under theoretical consideration (see Ozbudak 2004, Santill\'{a}n 2007).\\
\section*{Collins Toggle Switch}
\begin{itemize}
\item Want to build a mutual repression system with a means for switching between the two states.
\item Knew that they needed $n>1$ and $\alpha/\beta$ greater than a threshold value in order to get bistability.
\item ``What I cannot build, I do not understand.'' -- Feynman; unclear whether these simplifications of gene regulation would apply generally.
\item At the time of publication (Gardner 2000), the number of well-characterized repressors, promoters, and RBSes was limited. Needed to try multiple options.
\item The repressors and the promoters that contain their binding sites are:
\begin{itemize}
\item LacI binds to P$_{\textrm{trc2}}$
\item The lambda phage repressor cI (``see-one") binds to P$_{Ls1con}$
\end{itemize}
\item New genetic constructs are often designed and introduced into hosts on plasmids: small (3-10kb) circular pieces of double-stranded DNA.
\item Elements of a bacterial plasmid:
\begin{itemize}
\item Origin of replication (tricks host into amplifying the plasmid as if it were its own genome)
\item Often no explicit means of segregation between daughter cells (relying on high copy number)
\item A marker for selection (e.g. encoding antibiotic resistance, so that only bacteria that maintain a copy of the plasmid can survive in media containing the antiobitic)
\item For each gene, a transcriptional promoter, ribosome binding site (not required in eukaryotes), the gene's open reading frame (i.e. the codons that should actually be translated into amino acids), a stop codon, and a transcriptional terminator.
\item Optionally in bacteria: multiple RBSes and ORFs per transcriptional promoter/terminator (because operons can encode multiple proteins). A similar approach using ``internal ribosome entry sites" sort-of works in eukaryotes where IRESes are well-defined.
\item Ribosomes have a greater affinity for some RBSes than others. One straightforward way to change the production rate of a protein is to swap out the RBS.
\item It is also possible to change a protein's effective expression level by using a temperature-sensitive variant. Essentially these are proteins that contain mutations which place them on the cusp of not folding properly. When the temperature increases above a threshold, they unfold cease to function. (This is true for all proteins, but temperature-sensitive variants undergo this transition at a temperature where most of the organism is still functional.)
\end{itemize}
\item Can add IPTG (isopropyl-$\beta$-D-1-thiogalactopyranoside, an allolactose analog) to make LacI stop binding. Raising the temperature inactivates cI.
\item Difference in off rates explained by mechanism (temperaure-induced instability of cI vs. slow dilution of LacI following IPTG addition)
\end{itemize}
\end{document} | {
"alphanum_fraction": 0.7461197339,
"avg_line_length": 94.3267973856,
"ext": "tex",
"hexsha": "fbf021ad8170e3e79b57defcaa5c3fd32646159e",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-03-25T14:42:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-01-20T17:43:51.000Z",
"max_forks_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mewahl/intro-systems-biology",
"max_forks_repo_path": "lectures/Lecture 11 - Genetic Switches, Natural and Engineered/lecture 11 notes.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mewahl/intro-systems-biology",
"max_issues_repo_path": "lectures/Lecture 11 - Genetic Switches, Natural and Engineered/lecture 11 notes.tex",
"max_line_length": 881,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mewahl/intro-systems-biology",
"max_stars_repo_path": "lectures/Lecture 11 - Genetic Switches, Natural and Engineered/lecture 11 notes.tex",
"max_stars_repo_stars_event_max_datetime": "2019-01-31T17:23:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-20T17:43:31.000Z",
"num_tokens": 4234,
"size": 14432
} |
\chapter{Boosted Decision Trees}
\label{chap_BDT}
\section{Introduction} | {
"alphanum_fraction": 0.7972972973,
"avg_line_length": 18.5,
"ext": "tex",
"hexsha": "13533b5d408cba34deffbc86b40eaac29d5e365c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9dfc42f19a4e5f49b9e5023a88f93f092874240b",
"max_forks_repo_licenses": [
"LPPL-1.3c"
],
"max_forks_repo_name": "Nabarun21/My_PhD_Thesis",
"max_forks_repo_path": "thesis/BDT.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9dfc42f19a4e5f49b9e5023a88f93f092874240b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"LPPL-1.3c"
],
"max_issues_repo_name": "Nabarun21/My_PhD_Thesis",
"max_issues_repo_path": "thesis/BDT.tex",
"max_line_length": 32,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9dfc42f19a4e5f49b9e5023a88f93f092874240b",
"max_stars_repo_licenses": [
"LPPL-1.3c"
],
"max_stars_repo_name": "Nabarun21/My_PhD_Thesis",
"max_stars_repo_path": "thesis/BDT.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 21,
"size": 74
} |
%! program = pdflatex
\documentclass[11pt]{report}
\usepackage{geometry}
\geometry{a4paper}
\usepackage[parfill]{parskip}
\usepackage{fancyvrb}
\usepackage[T1]{fontenc}
\usepackage{graphicx}
\usepackage{amssymb}
\usepackage{epstopdf}
\usepackage{framed}
\usepackage{float}
\usepackage{color}
\sloppy
\usepackage[colorlinks=true, pdfstartview=FitV, linkcolor=blue,
citecolor=blue, urlcolor=blue]{hyperref}
\setlength{\parindent}{0pt}
\setlength{\parskip}{2ex plus 0.5ex minus 0.2ex}
\title{Introduction to Access Management \\
using Oracle Access Manager}
\author{Horst Kapfenberger - Agora Con GmbH}
\date{May 10, 2016}
\begin{document}
\RecustomVerbatimEnvironment
{Verbatim}{Verbatim}
{numbers=left,frame=lines,fontsize=\scriptsize,framesep=4mm}
\maketitle
\begin{abstract}
Introduction to Access Management in the area of Information
Technology. Description of user authentication, authentication
factors and methods.
Further explanation is done on the basis of an established
proprietary software, \emph{Oracle Access Management Access
Manager}.
Detailed description of the Single-Sign-On process and the
implementation in the considered product.
Sample messages and software configuration excerpts are added.
\end{abstract}
\tableofcontents
% -----------------------------------------------------------
\chapter{Access Management}
\section{Prelude}
In \emph{Information Technology} the topic \emph{Access Management}
describes endeavours of managing user access to data and system
features.\footnote{User access: beside human users other \emph{actors}
do exist. Automated processes, sensors, interfaces or batch jobs
interact the same way. We use the term user as comprehensive form of
actor, which implies \emph{including} all forms of access.} The sibling
discipline \emph{Identity Management}, delivers input to Access
Management --- a perpetually changing data set, containing user
identities and roles. All decisions are finally based on this set.
Access Management shall distinguish between the \emph{good ones} and the
\emph{bad ones}. The good ones (users allowed to access) shall be
enabled to access business transactions without undesired limitations in
terms of time, location or device used. The supplementary effort needed
to get access shall be minimized. As a consequence productivity will
increase.
The other group, the bad ones, are those we do not want to access the
system. No matter what efforts they undertake to get access,
unauthorized use shall be prevented and misuse shall be identified and
reported.
Access Management delivers sustainability in those areas:
\begin{description}
\item[Application Security] --- restrict users and programs, recognise
system abuse, identify identity theft
\item[User Productivity] --- providing users with ease access to their
needs, exactly to their needs: not more, not less
\item[Middleware technology] --- save investments with process
centralization and software reuse, providing interfaces based
on open internet protocols
\item[Compliance] --- as additional value delivered, compliance itself
usually won't work as impulse or driver
\end{description}
\section{In Real Life: Border Controls}
A good comparison to illustrate the main characteristics of
authentication is \emph{border control}. That one you'll most often find
on airports or on the actual boarder between countries.\footnote{There
was a time you had to leave \emph{Schengen Area} to find this seldom
experience, nowadays there seems to be an inflation of borderline
checks.}
You leave one domain, or more important: you enter another domain.
A domain that has a certain definition, borders, and authorities that
execute policies (for good and for bad, however). Those brave officers
want you to identify yourself --- your job is to provide them data, exact
and distinct data and some prove that you tell the truth.
The data is usually good enough to identify you among all human beings.
That quality of your prove must satisfy the procedure of those border
checks. With a valid passport you own a document from a common trusted
authority that covers this.
\begin{enumerate}
\item identifying you among all others \emph{(identification)} and
\item prove the correctness of your statement
\emph{(authentication)}
\end{enumerate}
For you, as the traveler, the desired outcome of this procedure is your
valid entry without much delay.
\subsection{Alternative Flows}
Thinking more general, what other situations in or outcomes of this
situation are possible?
\begin{itemize}
\item you could pretend to be another person, perhaps from another
country
\item you could sneak in the country, preventing any check
\item you could mix you passport by mistake in the cafeteria next
to the borderline. You enter the country with a different identity
without knowing
\item your passport is expired and you are not allowed to enter the
country
\item etc.
\end{itemize}
All those situations have a direct mapping to use cases in the IT world.
\subsection{Not Authorized}
One more thing is in common: at border controls there is no check what
exactly you are allowed to do --- in the country or domain.
There might be a rough categorization implied during the authentication,
like people with a certain nationality would need a visa or people out
of a certain country are not allowed to enter at all.
But this is not the place where a work or residence permit would be
checked.\footnote{At least as far the author knows.}
Using our vocabulary, we are \emph{authenticated} but no authorization
check has been done.
% -------------------------------------------------------------------
\chapter{Authentication}
\section{What's Authentication}
\emph{Authentication} is the act of confirming the truth of an attribute
of a single piece of data (a datum) claimed true by an entity. In
contrast with \emph{identification} which refers to the act of stating
or otherwise indicating a claim purportedly attesting to a person or
thing's identity, authentication is the process of actually confirming
that identity.
\section{Authentication Factors}
The ways in which someone may be authenticated fall into three
categories, based on what are known as the factors of authentication:
something the user \emph{knows}, something the user \emph{has}, and
something the user \emph{is}.
Each authentication factor covers a range of elements used to
authenticate or verify a person's identity prior to being granted
access, approving a transaction request, signing a document or other
work product, granting authority to others, and establishing a chain of
authority.
The three factor types and some of elements of each factor are:
\begin{description}
\item[Knowledge Factor] Something the user knows --- e.g.\ a
password, Partial Password, pass phrase, or personal identification
number (PIN), challenge response (the user must answer a question,
or pattern), Security question
\item[Ownership Factor] Something the user has --- e.g.\ wrist band,
ID card, security token, cell phone with built-in hardware token,
software token, or cell phone holding a software token
\item[Inherence Factor] Something the user is or does --- e.g.\
fingerprint, retinal pattern, DNA sequence (there are assorted
definitions of what is sufficient), signature, face, voice,
unique bio-electric signals, or other biometric identifier
\end{description}
For authentication to well-secured systems elements from at least two,
and preferably all three, factors should be verified.
\section{Authentication Methods}
What information the user is requested to enter during authentication
and how this information is passed to the service, this is defined as
authentication method.
Authentication methods are standardized, usually in RFC, to enable
interoperability and to ensure a certain system security among different
implementations. Authentication is a critical process and is target to
all kinds of security attacks.
\begin{description}
\item[User Passwords] \emph{Knowledge factor.} The most common used
method is also considered as the weakest and most problematic
one. Users seem to be very bad in \emph{choosing} password, in
\emph{remembering} them and in \emph{changing} them
periodically.\footnote{To increment an included number is not
considered as proper password change.}
Another risk is the transport of the real password over the
wire, what is the typical implementation in web
applications.\footnote{Mail clients offer alternative, standard
transport methods for over a decade now.}
On the other hand application developers seem to be good in
creating unsecure implementations, especially in storing or
dealing with passwords. Bottom line: if possible, prevent using
passwords in general.
\item[Kerberos] \emph{Knowledge and ownership factor.} Kerberos is
a distributed ticketing system, passing encrypted keys a
credentials. Kerberos version 5 is included in every mainstream
Unix/Linux/BSD system and Microsoft Windows as the default
authentication method.
Kerberos is the preferred method of authentication within LAN
network, but it was not widely used on WAN because an additional
communication port would have been necessary. With MS-KKDCP from
Microsoft this seems to change, it has been adopted by many open
source groups and vendors.
\item[RSA Token] \emph{Knowledge and ownership factor.} Proprietary
implementation from RSA Security Inc., currently a division of
EMC Corporation. Often used in Enterprise VPN solutions. RSA is
known to distribute products with backdoor for security
services. Especially problematic are RSA libraries included in
3rd party products (e.g.\ Dual\_EC\_DRBG in Windows, Java,
OpenSSL).
While its not recommended to use RSA Token, several alternatives
do exist that are considered secure, some of them are internet
standards.\footnote{RFC 1760 (S/KEY), RFC 2289 (OTP), RFC 4226
(HOTP) and RFC 6238 (TOTP).} More user-friendly then hardware
tokens are software versions for mobile devices, providing multi-
service-provider support.
\item[Client Certificates] \emph{Knowledge and ownership factor.}
X.509 certificates are considered to be secure. Prerequisite is
a private key infrastructure (PKI) with an online revocation
service though. Client certificates are often used within an
organization with an independent, self-signed certification
authority. This requirement makes it difficult (or too
expensive) for global usage. This might change in the near
future, as changes in the CA business model are already taking
place.
\end{description}
% -------------------------------------------------------------------
\chapter{Access Management Suite}
\section{Suite Overview}
Around 2010 Oracle corporation acquiree a dozen software vendors and
technology companies in the identity and access management market. Among
them were some niche players and also leaders, like Sun Microsystems
with several active products and a stable user base.
Developing a portfolio of twenty specialized systems to two or three
tightly integrated solutions definitely the overall plan, but not every
step is set straight on that path, looking at the footsteps we see
sometimes.
Next to the Identity Management Suite the Access Management Suite is the
second major product group, where the collected technologies deal with
run-time evaluation of user or system access.
The Access Manager itself is the heart of Suite, where other products or
new feature sets have been integrated or merged into.
\begin{itemize}
\item Access Manager
\item Identity Federation
\item Security Token Service
\item Mobile and Social
\item Adaptive Authentication Service
\item OAuth Services
\item Identity Context
\end{itemize}
\section{Suite Applications}
\subsection{Access Management Access Manager}
As the central part of the suite, Access Manager delivers the
administrative user interface. All authentication mechanisms are defined
here and components and processes can be activated or cut-off in the
admin user interface. Business applications can be registered and
external data sources containing identity data may be connected.
Distributed components, the policy enforcement points, are located
closer to the protected business applications and request authentication
and authorization information from the central access server over
encrypted channels.
\subsection{Identity Federation}
While Access Manager takes care of a single domain (or multiple but
independent domains), Identity Federation helps to connect domains
(e.g.\ two organizations with integrated business activities). In each
activity the roles and responsibilities are well defined. Identity
Federations supports SAML and OpenID\@.
\subsection{Security Token Service}
Token validation and generation to facilitate access to services across
security domains and beyond organizational boundaries. Essentially the
service acts as a trust-broker that receives and validates client
requests and generates appropriate tokens for a requested resource.
\subsection{Mobile and Social}
Mobile (for mobile devices): integrate iOS and Android mobile devices,
policy enforcement on those devices, single-sign-on for mobile apps and
browser based access, SDK on mobile devices.
Social (for social services): integrate authentication, policies for
several social services available on Internet.
\subsection{Adaptive Authentication Service}
Real-time and batch risk analytics to prevent fraud and misuse, risk
based authentication, additional authentication methods, like
One-Time-Passwords (OTP).
\subsection{OAuth Services}
OAuth 2.0 authentication client and server. Manage access control over
domain borders.
\subsection{Identity Context}
Enable dynamic adaption of permissions based on user related data, like
location, last transactions, third party informations, other assigned
permissions, etc.
This document covers topics from Access Management Access Manager only.
% --------------------------------------------------------------------
\chapter{Access Manager}
\section{Overview}
The role of Access Manager can be described as a gate keeper, in front
of all business applications. Each user request needs to pass this gate
keeper. It ensures that only authenticated requests are allowed to reach
the protected business application.
\begin{framed}
All integration practices described are considering the integration
of web applications. Web applications are applications that deliver
the user interface via HTTP or HTTPS\@.
\end{framed}
Access Manager delivers a common used, central login functionality, the
user authentication. The authentication method presented to the user is
determined based on several input parameter, like the resource the user
tries to access, the date and time, perhaps the user location or device
used, or the type of user access manager recognizes already
before the authentication.
The implementation of login functionality is per se critical and it is
critical in multiple ways:
\begin{itemize}
\item run time errors may block the whole business application
\item often an external system call is needed, the response data
and its interpretation is not obvious or may change over time
\item accounts may be locked in different ways, what shall be the
user error message?
\item the business application (perhaps a closed source 3rd party
application) needs to deal with user passwords
\end{itemize}
Dealing with all those challenges, a centralized solution like a
directory server, feels again like a redundant and error prone approach.
Quite often it also becomes a dangerous approach, when one of the
business applications or an operational tool is lacking the latest and
mandatory encryption cipher and now sends plain text or it logs one
attribute more than it should (yes, credentials in log files is not an
uncommon problem).
But not all application areas may trigger an authentication. For public
accessible areas the authentication check can be disabled in the
\emph{application domain} configuration. In addition auditing records
can be disabled for public accessible area. This prevents data
collections of no usage or even prohibited usage.
The single-sign-on session of an authenticated user includes its current
authentication level. We can perceive the session as an \emph{ticket}
to the systems behind the gate keeper. This ticket is stored central on
the access server and also on the client (using cookies, protected
against external changes). This authentication level is matched the
minimum authentication level attached to the resource the user requests.
The boundaries of the single-sign-on session can be defined by domain
and subdomain namespaces. However this will only be enforced for
integrated applications.
Each session has set a \emph{general time-out} and an \emph{inactivity
time-out}. The general time out will close the session and force a
re-authentication, no matter if there is currently any activity ongoing
or not. Default value of this general time-out is 24 hours. The
inactivity time-out is set to a shorter period, depending on the
customer needs and security level.
\section{Application Integration}
Business applications often require an independent application session,
where user data is attached to. This session information is not touched
by Access Manager.
Before our integration, the business application did act a as a
stand-alone application, in terms of the user session. An application
login page was presented to the user, in case the user had no active
session.
The \emph{integration} of an application is relative simple exercise,
since we essentially remove functionality and code. In particular the
login and logout functionality is removed. Instead of returning an
application login page, the application can take the attached
information like user name, user ID, or roles, and continue with the
transaction.
The logout link or button of the business application will remain on the
user interface. The business application will just redirect the user to
the general logout URI, which will trigger the necessary logout
callbacks to the used business applications. So the user sessions will
safely be closed on a general logout. However, as before this is
triggered when the user actively executes a logout.
The individual implementations of the user login and logout are removed
from applications during integration. Only authenticated requests can
reach the application from now on. While an authorization policy
enforcement is available and recommended, other products of the suite
may be necessary to cover different authorization requirements.
% ====================================================================
\chapter{Components of Access Manager}
In the following pages I will explain what parts or topics Access
Manager consists of and what the are good for:
\begin{itemize}
\item Access Server
\item Application Domains and Resources
\item Identity Store
\item Session Store
\item Policy Enforcement Points: Agents
\item Webgate Agent
\item SSO Cookies
\item Credential Collector
\item Certificate Validation and Revocation
\end{itemize}
\section{Access Server}
The Access Server is the \emph{backend runtime component} that serves
requests of the \emph{Policy Enforcement Points}. It is implemented as
J2EE service and Oracle supports Oracle WebLogic and IBM WebSphere as
application servers. In WebLogic the services are deployed to one or
more managed servers grouped in a WebLogic cluster. Most of the
interfaces and protocols are based on open standards, an exception here
is the proprietary protocol to the policy enforcement points, called OAP
(Oracle Access Protocol).
The configuration can be done using the web user interface
\emph{OAMConsole}, hosted on the WebLogic Admin Server. Many features
can also be configured using shell scripts or the WebLogic scripting
environment WLST\@. The substantial configuration is persisted as XML
file. Bindings and policies are stored in the relational database base
schema.
\section{Application Domains and Resources}
The value of an production access system, comes from the integrated
business applications. Therefore the management of those business
applications and their policies is a central area in Access Manager.
Applications are mapped to application domains. This mapping can be done
as an one-to-one relation, but also other transformations are possible,
as long as it helps the operational team to treat the integration as one
consistent block of applications, which is integrated or maintained at
once.
Each application domain consists of multiple resources. A resource can
be seen as an entitlement in the application domain. Since all resources
are expressed as \emph{Uniform Resource Identifier (URI)}, Access
Management uses the URI component \emph{path} and optionally \emph{query} and
\emph{fragment} as resource identifier.
Example resource identifier: \verb|/orders/**|
\section{Identity Store}
To execute the main feature of Access Manager, the authentication, we
need at least one set of identities that are allowed to authenticate.
This set must be stored in a supported directory server and should be
\emph{close} to Access Manager, in operational and data ownership
perspective.
Access Manager will read from and write to this identity store. The
required initial information in the store is the user login and some
status information of the user account.
Access Manager stores additional runtime data attached to the user
information.
Optional information that can be stored in the identity store can be
\emph{user credentials}, \emph{user roles}, etc.
In case you deploy \emph{Access Manager} with \emph{Oracle Identity
Manager} the directory is automatically populated and acts as a data hub
between Identity and Access Management.
\section{Session Store}
We already mentioned where user sessions are stored. But let's have a
closer look here.
For Access Manager it is essential to have clear accounting of active
user sessions, with additional information attached to them:
\begin{itemize}
\item when did the session start
\item timestamps used for time-outs
\item authentication method used
\item current and original authentication levels
\item reference to identity store
\item location (if applicable)
\end{itemize}
This information must be served quickly with high availability
requirements. Access Manager maintains this list in Coherence.
\begin{framed}
Oracle Coherence is a proprietary in-memory data grid, that improves
performance, scalability and reliability compared to relational
databases. It can be used as an persistence system or as a caching
method in combination to relational database system.
Coherence was developed by \emph{Tangosol Inc.}, which was acquired
by Oracle Corporation in 2007. Several Oracle applications, like
Access Manager or SOA, use Coherence.
\end{framed}
The Access Manager admin interface \emph{oamconsole} comes with a
feature for querying and maintaining the session list.
The session information placed at the client side is described in the
section Cookies.
\section{Policy Enforcement Points: Agents}
The Policy Enforcement Points (PEP) are the components actively asking
Access Manager for user access and are enforcing this policy. They act
as gatekeeper to the business applications. One agent implementation is
shipped with the Access Manager Suite, other Oracle implementations are
available but are considered deprecated. Furthermore individual
implementations are supported.
To fulfill its work as gatekeeper a PEP has to be involved in every
client request to the business application, preferably very early in the
request processing to protect the application from different types of
attacks.
For web applications this is usually done in the \emph{demilitarized zone
(DMZ)}, on an perhaps already existing \emph{reverse proxy}.
For incoming request those attributes are evaluated:
\begin{itemize}
\item requested resource (URI): host and path
\item existing cookies
\item misc.\ attributes, like location, etc.
\item verification data from access server
\end{itemize}
Since there is no policy information stored in distributed locations,
where PEP are deployed, those information must be gathered from the
access server. This information is heavily cached in the PEP, to speed
up decision response time and throughput.
Possible outcomes of the decision are:
\begin{itemize}
\item redirect to credential collector
\item let request pass with audit entry
\item let request pass without audit entry
\end{itemize}
The communication to the Access Server is crucial and must be protected
against attacks. Access Manager and the PEP implementation can be
configured to use a general encryption key (this is the default setting)
or agent instance related keys.
\section{Webgate Agent}
The Webgate Agent is a Policy Enforcement Point implementation to deploy
on HTTP Servers like \emph{Oracle Web Server} or the \emph{Apache
HTTPD}.\footnote{There has been a version available for Microsoft
Internet Information Server (IIS). This versions was seemed to be outdated
and the author was not able to install this binary successfully.
Since reverse proxies based on Windows are not widely used, I would
not expect further development in this area.} It is shipped as a binary,
compiled for the main platforms, and is integrated as a module for the
web server. No additional run time process is necessary.
The necessary configuration is done inside the HTTPD configuration file
and a general setup with its own configuration file. The communication
channel to the Access Server is protected by certificates located in
configuration directory. There are two types of protection:
\begin{itemize}
\item protecting all agents with the same key
\item protecting each agent with an individual key
\end{itemize}
The standard HTTPD configuration defines which requests are covered by
Webgate. For not included resources Webgate and Access Manager are not
involved at all. For covered resources the decision outcome may also be
\emph{public available}, however the resource must be known to Access
Manager, otherwise an error will be returned to the user.
Agents can be configured with a broad range of log options. Those logs
are stored on the reverse proxy and are not to be confused with the
central stored audit logs. Particular care is recommended to not include
sensitive request payload information in the log files.
\section{SSO Cookies}
Within the area of Access Manager nine different types of HTTP cookies
are used. Typically three or four cookies are in use in one setup,
if legacy support is needed. Those cookies are automatically encrypted
or signed by the server components.
The \emph{Oracle Fusion Middleware Administrator's Guide for Oracle Access
Management} contains the exhausted list of nine types and description
under what circumstances each type is deployed.
\section{Credential Collectors}
Credential Collectors are web sites with an embedded form, requesting a
user identifier and credentials to authenticate. The user is redirected
to a credential collector, because he requested a resource and his
current authentication level is lower than required for accessing the
requested resource. To access the resource an certain authentication is
necessary and the user is required to do so. During the authentication
process the original request shall not be lost, otherwise the user would
perhaps authenticate successfully but not get the resource he asked for.
Therefore the request data is dragged along the whole process.
There are two types of credential collectors:
\begin{description}
\item[Embedded Credential Collector (ECC)] Located in
WebLogic managed server of Access Manager. The ECC is shipped
with the product, customization possibilities are limited.
\item[Detached Credential Collector (DCC)] Located outside
Access Manager, preferably in the DMZ\@. No Java application
server needed. A \emph{Perl} implementation as sample code is
included with product.
\end{description}
The main difference is the location in the network architecture and the
security consequences of this decision. While the usage of ECC allow
unauthenticated requests to reach application tier, DCC terminates all
unauthenticated requests already in the DMZ\@.
\section{Certificate Validation and Revocation}
This module delivers standard services for a \emph{Private Key
Infrastructure (PKI)} for announcing the validity of X.509 certificates
in a domain. The current version of the service definition can be found
in
\begin{itemize}
\item RFC 5280 Internet X.509 Public Key Infrastructure Certificate
and Certificate Revocation List (CRL) Profile
\item RFC 6960 X.509 Internet Public Key Infrastructure Online
Certificate Status Protocol (OCSP)
\end{itemize}
While CRL are signed list of all certificate revocations of the domain,
OCSP is an online status protocol that can be used by clients or
during preprocessing by a delivery service, know as \emph{OCSP Stapling}.
% -------------------------------------------------------------------------
\chapter{Requested Feature Descriptions}
\section{Authorization Policies}
Access Manager offers an additional security layer for protecting resources,
After evaluating the authentication policy attached to a resource, Access
Manager looks for an existing authorization policy. Authorization policies can
narrow the set of users allowed to access a certain feature.
\begin{description}
\item{Example 1}: The admin features of a sales application shall only be
accessible from the internal network and only during business hours.
\item{Example 2}: The order cancellation feature shall only be accessible by
user authenticated by Kerberos or RSA Token.
\end{description}
Authorization policies in Access Manager are not meant to be a replacement for
the typical authorization features within a business application. As an
additional protection layer in front of the business application, the main
benefits are:
\begin{itemize}
\item Additional line of protection of sensible features, like
administrative user interfaces or whole business applications. Without
Access Manager a single mistake in the business application would
expose critical transactions or data to unauthorized users.
\item Common configuration and reporting over all applications. Without
Access Manager multiple sources or departments need to be consulted.
\item Enable new levels of security: access can be granted based on the
quality of the authentication method. Additional authentication steps
(e.g.\ transaction codes or hardware tokens) may enable user to execute
additional transactions or increase certain thresholds.
\item Authorization based on user location (IP address) or within defined
time frame is possible.
\end{itemize}
\subsection{Implementing Authorization Policies}
Authorization policies are defined in an application domain and are attached
resources of the domain --- like authentication policies.
Authorization policies consist of conditions, rules and responses. The
evaluation of the policy shall deliver a boolean result value --- allow or deny
--- and an optional response value.
\begin{description}
\item{Rules} --- rules are executed to get the boolean return value true
(allow) or false (deny). Not returning any value is not allowed. Rules
consist of conditions.
\item{Conditions} --- conditions are user or environment attribute
comparisons. See below for details.
\item{Responses} --- are the result values of the policy can be exported
for reuse within Access Manager or for the business application. The
result can be attached to the request (as HTTP header variable), to the
session or to the cookie.
\end{description}
\subsubsection{Conditions}
For expressing the rules there are four types of conditions available. However
it's not the condition that renders allow or deny --- the interpretation of the
condition result is done in the rule.
\begin{description}
\item{Identity} --- Makes use of Access Managers identity stores in two
possible ways. One is the static definition of a set of identities from
the identity store. The other is to express LDAP queries against the
store.
\item{IPv4 Address Range} --- The interpretation of client IP address
ranges depends on your user base. Usually important information about
the client can be derived from the IP address. Currently only IPv4 is
supported.
\item{Temporal} --- Time periods, expressed as day of the week, from HH:MM
and to HH:MM\@. The time entered here is always evaluated as Greenwich
Mean Time (GMT) without any adjustment of possible daylight savings
time.
\item{Attribute} --- attributes that are attached to the user request, the
session, or the user LDAP object can be used in the comparison. Among
them are session count, authentication scheme or level, resource name
or host name,
\end{description}
% -------------------------------------------------------------------------
\chapter{Proof of Concept}
\section{Summary}
The Proof of Concept (PoC) is virtual machine including all software and
configuration to run \emph{Oracle Access Manager 11gR2} and an additional web
application that has been integrated into the Access Manager architecture.
The Oracle components have been setup automatically using \emph{iam-deployer}.
The project management software \emph{Redmine} is used to demonstrate the
integration procedure and Access Manager features.
\section{Software Components}
The virtual machine is running CentOS 6.7 64bit, 32 GB RAM and a total
disk space of 50 GB have been assigned. For kernel parameter and system
configuration please see \emph{iam-deployer}.
Following Oracle suite components are setup:
\begin{itemize}
\item Oracle Access Manager 11.1.2.3
\item Oracle Identity Manager 11.1.2.3
\item Oracle Unified Directory 11.1.2.3
\item Oracle HTTP Server 11.1.2.3 (based on Apache 2.2)
\item Oracle Database Enterprise Edition 11.2
\end{itemize}
The sample web application consists of these components:
\begin{itemize}
\item Redmine 3.2.1
\item Thin 1.6.4 (Ruby 2.3, Rails 4.2)
\item MySQL Server 5.1.73
\end{itemize}
Hardware load balancer replacement and TLS termination:
\begin{itemize}
\item HAProxy 1.5.4
\end{itemize}
Since PS3 the default setup of the Oracle suite requires an amount of RAM that
makes it difficult to run the virtual machine on a standard laptop. A
sample VM is hosted in Vienna.
\section{SSO Process Flow}
A sign-on using Access Manager triggers a certain number of system
interactions, depending on the configuration, the requested resource and the
required authentication methods.
The first diagram is a simplified overview, the second is a more detailed
sequence diagram of those interactions.
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{diag/msgdiag}
\caption{Login process with messages}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{diag/seqdiag}
\caption{Sequence diagram of SSO login process}
\end{figure}
\section{HTTP Routing}
It is important to understand the routing of the client HTTP requests. The
access control is taking place before the request reaches the addressed
resource.
\begin{figure}[H]
\centering
\includegraphics[width=1\textwidth]{diag/iamvs}
\caption{HTTP routing to applications}
\end{figure}
\newpage
\section{Infrastructure Configuration}
The configuration here is based on an properly configured Access Manager
instance, e.g.\ created by \verb|iam-deployer|.
\subsection{Domain Name Entries}
The Oracle applications and the new webapp are accessed using different host
names. This is not mandatory but a recommended approach for putting several
applications on one access point.
This \emph{domain name entries} have been configured for the instance:
\begin{Verbatim}[label=DNS Zone]
$ORIGIN agoracon.at.
$TTL 86400
@ 86400 IN SOA dns1.agoracon.at. admin.agoracon.at. (
2016042013 1d 1h 4w 1h )
...
iamvs IN A 213.47.150.108
IN TXT "v=SUB access identity redmine"
access.iamvs IN CNAME iamvs
access-admin.iamvs IN CNAME iamvs
access-api.iamvs IN CNAME iamvs
access-mobile.iamvs IN CNAME iamvs
identity.iamvs IN CNAME iamvs
identity-admin.iamvs IN CNAME iamvs
identity-api.iamvs IN CNAME iamvs
dir-admin.iamvs IN CNAME iamvs
omsas.iamvs IN CNAME iamvs
redmine.iamvs IN CNAME iamvs
...
_443._tcp.iamvs.agoracon.at. 86400 IN TLSA (1 0 1 fingerprint...)
...
\end{Verbatim}
Recommended security mechanisms:
\begin{itemize}
\item DANE\footnote{RFC 6394: Use Cases and Requirements for DNS-Based
Authentication of Named Entities (DANE)} and TLSA\footnote{RFC 6698:
The DNS-Based Authentication of Named Entities (DANE) Transport
Layer Security (TLS) Protocol: TLSA}
\item HTTP Strict Transport Security\footnote{RFC 6797: HTTP Strict
Transport Security (HSTS)}
\item DNSSEC\footnote{Around a dozen RFC and growing. A good start point
is RFC 4033: DNS Security Introduction and Requirements}
\end{itemize}
\subsection{Load Balancer}
A software replacement for the hardware load balancer used in production is
configured in the virtual machine. This enables operations to use the same web
configuration across all environments. \verb|HAProxy| can be installed as an
OS repository package.
The configuration used for this instance:
\begin{Verbatim}[label=haproxy.conf]
# $haproxy: haproxy.conf,v 1.0 2016/04/06 16:25:44 kapfenho Exp $
# vi:ft=haproxy:
# Part of IAM deployment
#
global
daemon
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
stats socket /var/lib/haproxy/stats
# tuning
tune.ssl.default-dh-param 2048
tune.ssl.cachesize 20000
tune.ssl.lifetime 3600
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend http_proxy
bind *:80
redirect scheme https if !{ ssl_fc }
frontend https_proxy
bind *:443 ssl crt /etc/haproxy/x509/iamvs.pem force-tlsv12
acl host_red hdr_beg(host) -i redmine
acl host_oim hdr_beg(host) -i identity-api.
acl host_oim hdr_beg(host) -i identity.
default_backend oam
use_backend red if host_red
use_backend oim if host_oim
backend oam
server oam1 localhost:7777 maxconn 256 check
backend oim
server oim1 localhost:7778 maxconn 256 check
backend red
server red1 localhost:7779 maxconn 256 check
\end{Verbatim}
\newpage
\section{Integration Tasks}
To integrate a web application into Access Manager the following tasks need to
be performed:
\begin{itemize}
\item Place Policy Enforcement Point on request path (infrastructure)
\item Configure Application Domain in Access Manager (Access Manager)
\item Remove application login and configure SSO user (application)
\item Change logout behaviour (application)
\end{itemize}
Changes in Access Manager can be done using the user interface
\emph{oamconsole}, by importing XML definition files or by using the WLST
scripting interface.
\subsection{Routing via Policy Enforcement Point}
A policy enforcement point (PEP, e.g.\ Webgate) needs to be able to inspect the
client request. This can be achieved by using an already existing reverse proxy
with PEP, creating an additional reverse proxy with PEP or deploy Webgate to an
existing reverse proxy. There are also some commercial firewall products
available, that contain compatible PEP (additional fees may apply for add-ons).\@
Additional PEP need to be registered in Access Manager. The communication
between Access Manager and the PEP needs to be secured. This can be done in a
general way for all PEP (of the same type) or by individual certificates for
each PEP instance.
\subsection{Configure Application Domain in Access Manager}
The new application domain can be configured using the user interface or by
definition import. The domain definition contains all resource definitions
along with their authentication and authorization policy.
\begin{Verbatim}[label=Register Application Domain]
<?xml version="1.0" encoding="UTF-8"?>
<PolicyRegRequest>
<serverAddress>http://iamvs.agoracon.at:7001</serverAddress>
<hostIdentifier>iamvs.agoracon.at</hostIdentifier>
<applicationDomainName>RedmineDomain</applicationDomainName>
<protectedAuthnScheme>LDAPScheme</protectedAuthnScheme>
<protectedResourcesList>
<resource>/account/**</resource>
<resource>/news/**</resource>
<resource>/issues/**</resource>
<resource>/projects/**</resource>
<resource>/boards/**</resource>
<resource>/journals/**</resource>
<resource>/imports/**</resource>
<resource>/my/**</resource>
<resource>/users/**</resource>
<resource>/wiki/**</resource>
<resource>/time_entries/**</resource>
<resource>/activity/**</resource>
<resource>/attachment/**</resource>
<resource>/groups/**</resource>
<resource>/trackers/**</resource>
<resource>/admin/**</resource>
<resource>/workflows/**</resource>
<resource>/**</resource>
</protectedResourcesList>
<publicResourcesList>
<resource>/public/**</resource>
</publicResourcesList>
<excludedResourcesList>
<resource>/excluded/**</resource>
</excludedResourcesList>
<hostPortVariationsList>
<hostPortVariations>
<host>redmine.iamvs.agoracon.at</host>
<port>7779</port>
</hostPortVariations>
</hostPortVariationsList>
<rregApplicationDomain>
</rregApplicationDomain>
</PolicyRegRequest>
\end{Verbatim}
\subsection{Login Process Integration}
With the Access Manager integration the application won't need an login screen
on it's own any more. Where ever an authentication is necessary, the request
will be authenticated before it reaches the application. The application needs
to take the principal set by Access Manager.
In our example HTTP header variable named \verb|HTTP_OAM_REMOTE_USER| is used
for passing the user identifier. Using header variables is common method for
SSO, commercial software products usually support this by configuration.
A customization for a Rails application is done with three additional lines
(including the comment).
\begin{Verbatim}[label=Changes for Login]
diff --git a/app/controllers/application_controller.rb b/app/controllers/application_controller.rb
index 092c83a..f8741a6 100644
--- a/app/controllers/application_controller.rb
+++ b/app/controllers/application_controller.rb
@@ -103,6 +103,9 @@ class ApplicationController < ActionController::Base
user = (User.active.find(session[:user_id]) rescue nil)
elsif autologin_user = try_to_autologin
user = autologin_user
+ elsif request.env["HTTP_OAM_REMOTE_USER"]
+ # userid passed by oam in http header
+ user = (User.active.find_by_login(request.env["HTTP_OAM_REMOTE_USER"].to_s) rescue nil)
elsif params[:format] == 'atom' && params[:key] && request.get? && accept_rss_auth?
# RSS key authentication does not start a session
user = User.find_by_rss_key(params[:key])
\end{Verbatim}
\subsection{Logout Process Integration}
The following lines are taken from a reverse proxy access log and show the SSO
logout process. In this session two applications were used, Redmine and the
Access Manager oamadmin application.
\begin{Verbatim}[label=HTTP Calls during Logout]
redmine.iamvs.agoracon.at "POST /logout HTTP/1.1" 301 117
redmine.iamvs.agoracon.at "GET /oamsso/logout.html HTTP/1.1" 302 261
access.iamvs.agoracon.at "GET /oam/server/logout HTTP/1.1" 200 3111
redmine.iamvs.agoracon.at "GET /oam_logout_success HTTP/1.1" 200 114
access-admin.iamvs.agoracon.at "GET /oam_logout_success HTTP/1.1" 200 114
access.iamvs.agoracon.at "GET /oam/pages/logout.jsp? HTTP/1.1" 200 1899
\end{Verbatim}
The POST with resource \verb|/logout| is taken by the agent and redirected to
the agent default logout page (\verb|/oamsso/logout.html|), followed by a call
to the access server central logout (\verb|/oam/server/logout|). The session
information is invalidated now on server side (Coherence) and on client side
(Cookies). A sequence of call-backs to applications involved in this session
is processed, triggering the application specific logouts
(\verb|/oam_logout_success|). Eventually the user gets the configured post
logout page.
\begin{Verbatim}[label=Changes for Logout]
diff --git a/config/routes.rb b/config/routes.rb
index 9652496..bdaf675 100644
--- a/config/routes.rb
+++ b/config/routes.rb
@@ -19,7 +19,10 @@ Rails.application.routes.draw do
root :to => 'welcome#index', :as => 'home'
match 'login', :to => 'account#login', :as => 'signin', :via => [:get, :post]
- match 'logout', :to => 'account#logout', :as => 'signout', :via => [:get, :post]
+ match 'logout' => redirect('/oamsso/logout.html'), as: :signout, via: [:get, :post]
+ match 'oam_logout_success', :to => 'account#logout', :via => [:get, :post]
match 'account/register', :to => 'account#register', :via => [:get, :post], :as => 'register'
match 'account/lost_password', :to => 'account#lost_password', :via => [:get, :post], :as => 'lost_password'
match 'account/activate', :to => 'account#activate', :via => :get
\end{Verbatim}
% -------------------------------------------------------------------------
\chapter{Miscellanies}
\section{Authenticated Request}
An authenticated user requests a resource. Webgate has successfully verified the
request and added the user identifier (\verb|OAM_REMOTE_USER|) and domain
(\verb|OAM_IDENTITY_DOMAIN|). The request is now passed to the upstream system.
\begin{Verbatim}
Hypertext Transfer Protocol
GET /cs/resources/yui/treeview/assets/treeview-core.css HTTP/1.1
[Expert Info (Chat/Sequence): GET /cs/resources/yui/treeview/assets/tre
eview-core.css HTTP/1.1
[Message: GET /cs/resources/yui/treeview/assets/treeview-core.css
HTTP/1.1]
[Severity level: Chat]
[Group: Sequence]
Request Method: GET
Request URI: /cs/resources/yui/treeview/assets/treeview-core.css
Request Version: HTTP/1.1
Host:ucmdmsint.example.com
User-Agent:Mozilla/5.0 (X11; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/
27.0 Iceweasel/27.0a2
Accept:text/css,*/*;q=0.1
Accept-Language:en-US,en;q=0.5
Accept-Encoding:gzip, deflate
DNT:1
Referer:https://ucmdmsint.example.com/cs/idcplg?IdcService=GET_DOC_PAGE&Act
ion=GetTemplatePage&Page=HOME_PAGE
Cookie:IntradocAuth=NTLM; IdcLocale=English-US; IntradocLoginState=1; JSESS
IONID=Q5cFSz0FXpYfHKTyyYvdrnjHGKw9jQLXRw0w0wbsTMj1yRC9m1bm!-197
112114!-1961464539; _WL_AUTHCOOKIE_JSESSIONID=rcC0mwhZjCRvXDuC5
3H6; IdcTimeZone=Europe/Vienna
If-Modified-Since:Thu, 16 Feb 2016 15:28:49 GMT
Cache-Control:max-age=0
OAM_REMOTE_USER:KAPFENHO
OAM_IDENTITY_DOMAIN:OIMIDStore
ECID-Context:1.004vUS6Q5_AFo2wQcEiOPL0003SV00003c;kYjE1ZDLIPIRj3RSj2TPnVPTm
JPPnV9UpPRBoITP_MTQ_NVBXJVS_KVSj4US_LQTdLRTh3RRmLQBZJVS
Connection:Keep-Alive
WL-Proxy-SSL:true
X-Forwarded-For:192.0.2.16
WL-Proxy-Client-IP:192.0.2.132
Proxy-Client-IP:192.0.2.132
WL-Proxy-Client-Port:30979
X-WebLogic-KeepAliveSecs:30
X-WebLogic-Request-ClusterInfo:true
x-weblogic-cluster-hash:dZbfyq4+X4kn1DYr6dYHPPOWqpk
[Full request URI: http://ucmdmsint.example.com/cs/resources/yui/treeview/a
ssets/treeview-core.css]
[HTTP request 2/10]
[Prev request in frame: 62]
[Response in frame: 135]
[Next request in frame: 160]
\end{Verbatim}
\end{document}
% vi:set lbr breakindent:
| {
"alphanum_fraction": 0.7487020767,
"avg_line_length": 36.9321533923,
"ext": "tex",
"hexsha": "8c23a3c87ae11288bc0e2703172b5bf93bb92171",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b4d980969d526a93ae6e6d5e9c0770a4ac47c662",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "kapfenho/access-intro",
"max_forks_repo_path": "access.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b4d980969d526a93ae6e6d5e9c0770a4ac47c662",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "kapfenho/access-intro",
"max_issues_repo_path": "access.tex",
"max_line_length": 111,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b4d980969d526a93ae6e6d5e9c0770a4ac47c662",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "kapfenho/access-intro",
"max_stars_repo_path": "access.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 11202,
"size": 50080
} |
%!TEX program = pdflatex
% Full chain: pdflatex -> bibtex -> pdflatex -> pdflatex
\documentclass[lang=en,12pt]{elegantpaper}
\usepackage{url}
\usepackage[binary-units=true]{siunitx}
\newcommand{\mysec}[1] {
\SI[per-mode=symbol]{#1}{\second}
}
\title{Project Report of NCS \& OLMP}
\author{WU Yechang}
\institute{11711918}
\date{\today}
\begin{document}
\maketitle
\begin{abstract}
\textbf{NCS} is a evolutionary algorithm which maintains multiple individual search processes in parallel and models the search behaviors by encouraging differences among the probability distribution.
By this means, individual search processes share information and cooparate with each other to search diverse regions of search space.
\textbf{OLMP} is Optimization based LMP (OLMP), an optimization based approach, used to automatically tune the pruning thresholds for LMP.
Concretely, the threshold tuning problem is formulated as a constrained optimization problem, which requires minimizing the size (i.e., thenumber of connections) of the pruned network, subject to a constraint on the accuracy loss. Then, a powerful derivative free optimization algorithm is employed to solve this problem.
Relative code can be found on \href{https://github.com/Wycers/Codelib/tree/master/CS303/lab4-6}{https://github.com/Wycers/Codelib/tree/master/CS303/lab4-6}
\keywords{NCS, OLMP, parameters}
\end{abstract}
\section{Algorithm description}
\subsection{Main Idea of NCS}
Negative correlation search, \textbf{NCS}, is a search algorithm that inherits the basic form of the parallel hill climbing algorithm.
The individuals in the population rely on the Gaussian mutation operator to generate a new solution.
\textbf{NCS} use $f(x_i)$ to evaluate the adaptability of solution $x_i$ and $Corr(x_i)$ to evaluate the correlation of solution $x_i$.
For the minimization problem, for the i-th search process, normalize the gap with the currently known optimal solution.
A smaller $f(x_i)$ indicates that $x_i$ has a better fitness,
and a larger $Corr(p_i')$ indicates that the progeny produced by $x_i$ will be at a far distance from those those individuals with local search behavior.
Therefore, those solutions where $f(x_i)$ is smaller and $Corr(p_i)$ is larger will tend to be retained.
\subsection{Applications of NCS}
For the problem of large solution space, \textbf{NCS} can find the approximate optimal solution. For example,
\begin{enumerate}
\item Search for extreme points of a math function.
\item Search for generalized Fermat points.
\item Search for answer to travelling salesman problem (\textbf{TSP}).
\end{enumerate}
\subsection{Main Idea of OLMP}
An automatic tuning approach based on optimization.
The idea is to transform the threshold tuning problem into a constrained optimization problem (i.e., minimizing the size of the pruned model subject to a constraint on the accuracy loss),
and then use powerful derivative-free optimization algorithms to solve it. In this premise, the accuracy of model will not be damaged.
\subsection{Applications of OLMP}
For the problem of large solution space, the approximate optimal solution is obtained.
\begin{enumerate}
\item Apply pruning to DNN so that the model size can be minimized.
\item Enhance efficiency of model training.
\item Reduce the threshold of training and deployment of DNN model.
\end{enumerate}
\section{Parameters description}
\subsection{$\lambda$}
\subsubsection{Role}
$\lambda$ is parameter used to tune the weight of exploration and exploitation.
Assume that $\sigma_i$ generated a new solution $\sigma_i'$, if $\frac{f(\sigma_i)}{Corr(f(\sigma_i))} < \lambda$, the new solution $\sigma_i'$ will be accepted.
Otherwise, the $\sigma_i'$ will be discarded.
\subsubsection{Effect on final performance}
According to the implementation, the smaller the $\lambda$ is, the new solution will be more difficult to be accepted.
\subsubsection{Best range}
For F6 and F12, $\lambda$ is good to be nearly to $1$, so I choose from $[0.5, 1.0]$.
\subsection{r}
\subsubsection{Role}
\textbf{r} is used to tune the step size.
After each epoch iterations, each $\sigma_i$ will be updated according to the 1/5 successful rule suggested.
Which means, after each \textbf{epoch} iterations, if the probability of getting a better solution is greater than $20\%$,
Thus, the search step-size should be reduced (by $r^{-1}$ times).
On the other hand,
if a RLS frequently failed to achieve a better solution in the past iterations,
it might have been stuck in a local optimum. In this case, the search step-size will be reduced (by $r$ times) to help the RLS explore other promising regions in the search space.
\subsubsection{Effect on final performance}
According to the heuristic rule adopted in NCS, the larger the \textbf{r} is, the individuals will behave more ``radically''.
In other words, the larger the \textbf{r} is, while a individual generate new individual, the new individual will more close to old individual.
\subsubsection{Best range}
For F6 and F12, r is good to be nearly to 1, so I choose from $[0.5, 1.0]$.
\subsection{epoch}
\subsubsection{Role}
\textbf{Epoch} is the number of iteration in each searching period.
As inferred in last subsection, after each \textbf{epoch} iterations, each $\sigma_i$ will be updated according to the 1/5 successful rule suggested.
\subsubsection{Effect on final performance}
Each \textbf{epoch} iterations, new individuals will be generated and replace individuals before.
The larger the \textbf{epoch} is, the longer searching time individuals will have.
So if \textbf{epoch} is larger, individuals will search more deeply around current rigion.
\subsubsection{Best range}
With my practical experience, the best range of epoch is $[100, 200]$ for F6 and F12.
\subsection{n}
\subsubsection{Role}
\textbf{n} is the number of areas to search while iterating.
In the beginning, \textbf{n} individuals will be generated randomly and iterated \textbf{epoch} times.
\subsubsection{Effect on final performance}
Intuitively speaking, the larger the \textbf{n} is, the slower the search iteration is.
Because each iteration will generate \textbf{n} individuals, the search procedure will cover more regions but cost more time and computation resources.
\subsubsection{Best range}
With my practical experience, the best range of epoch is $[1, 3]$ for F6 and F12.
For F29, the final result is only related to \textbf{n}, $n = 92$ is the best range.
\begin{table}[htbp]
\small
\centering
\caption{Final values and local results \label{tab:reg}}
\begin{tabular}{cccc}
\toprule
& F6 & F12 & OLMP \\
\midrule
\textbf{$\lambda$} & 0.7050851182889523 & 0.7106047788961356 & 0.47548385463535714 \\
\textbf{r} & 0.9534024348847432 & 0.5851014575394479 & 0.9295558997353595 \\
\textbf{epoch} & 117 & 163 & 233 \\
\textbf{n} & 1 & 1 & 92 \\
\textbf{Local Result} & 390.00000000690886 & -459.9999999997802 & 0.9889914106747684 \\
\textbf{Online Result} & 390.00000000692114 & -460 & 0.9889914106747684 \\
\textbf{Running Time} & \mysec{47.52} & \mysec{35.68} & \mysec{68.59} \\
\bottomrule
\end{tabular}%
\end{table}%
\section{Tuning procedure}
I used \textbf{simulated annealing} algorithm to tune the parameters.
\textbf{Simulated annealing} algorithm uses heating and cooling control strategies to adjust the search process away from local extremum points, inspired by the metallurgical annealing process.
With temprature decreasing, the search process shifts from exploration to utilization.
For F6 and F12
\begin{lstlisting}
randomly generate a parameter p
while (Temprature > pre_defined_temprature)
for pre_defined_epoch iteration
generate new parameter with respect to current parameter and current temprature
calculate result by call NCS(new parameter)
if result is better
accept new parameter(set p as new parameter)
else
randomly accept new parameter by difference of result and current temprature
output p
\end{lstlisting}
For F12, I search for all possible $n$, then find the best result.
\section{Conclusion}
During this lab, I learned a NCS besides hill-climbing algorithm and simulated annealing algorithm and had a better understanding of evolutionary algorithm.
I implemented simulated annealing algorithm to tune the search process. I runned my program for 3 days and finnally get results good enough.
\nocite{*}
\bibliography{wpref}
\end{document}
| {
"alphanum_fraction": 0.7497125776,
"avg_line_length": 51.7738095238,
"ext": "tex",
"hexsha": "0cf922af1e21d815e22944e0b5249985b3dffa68",
"lang": "TeX",
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-10-10T08:56:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-09T15:41:26.000Z",
"max_forks_repo_head_hexsha": "86d83787aa577b8f2d66b5410e73102411c45e46",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Wycers/Codelib",
"max_forks_repo_path": "CS303/lab4-6/report/index.tex",
"max_issues_count": 28,
"max_issues_repo_head_hexsha": "86d83787aa577b8f2d66b5410e73102411c45e46",
"max_issues_repo_issues_event_max_datetime": "2022-02-26T18:50:00.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-04T23:47:22.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Wycers/Codelib",
"max_issues_repo_path": "CS303/lab4-6/report/index.tex",
"max_line_length": 321,
"max_stars_count": 22,
"max_stars_repo_head_hexsha": "86d83787aa577b8f2d66b5410e73102411c45e46",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Wycers/Codelib",
"max_stars_repo_path": "CS303/lab4-6/report/index.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-12T02:12:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-08-07T06:55:10.000Z",
"num_tokens": 2172,
"size": 8698
} |
\chapter{Introduction}\label{introduction}
\input{sections/introduction/motivation.tex}
\input{sections/introduction/problem.tex}
\section{Limitations}
Based on the goals and requirements stated in the previous section, the scope of this thesis is to design and implement an application capable of recording breathing data obtained by the Flow sensor over an extended period.
We limit the scope to integrating the support for the Flow sensor in Nidra, and excluded to test for already integrated sensor sources (e.g., BITalino). Further, with the Flow sensor provided under development, we restricted the design to collect respiration (breathing) data (as opposed to hearth-rate or other physiological data).
The application is designed to collect breathing data; we do not perform any analysis to predict or detect sleeping disorders based on the data. However, provide an interface for patients to record their breathing data and share the data with their researchers/doctors for detection or analysis. Although, we facilitate an interface for future developers to utilize the data provided by Nidra to perform advanced analysis, examination, or prediction of the disorder.
Finally, the implementation is Android-specific as the previous work performed on the project is designed solely for Android applications.
\section{Research Method}
The work in this thesis is classified as \textit{computing research} with a principle approach of an \textit{engineering method} as described in \cite{Glass_1995}. The engineering method (evolutionary paradigm) is to: (i) observe existing methods, (ii) propose a better solution, (iii) build or develop artifacts\footnote{human-manufactured objects produced during the development}, and (iv) measure and analyze until no further improvements are possible. The report identifies patterns amongst various principle approaches and categorizes the patterns into phases: \textit{(i) informational phase}, \textit{(ii) propositional phase}, \textit{(iii) analytical phase}, and \textit{(iv) evaluation phase}. Below, we give a brief description of each phase and discuss how our work fits into each of them.
\newpage
\subsection{Informational Phase}
The informational phase according to the report is to \textit{"gather or aggregate information via reflection, literature survey, people/organization survey, or poll"}
In this thesis, we survey previous related work in the field of detecting, analyzing, and diagnosing sleep related-breathing disorders on a mobile device. Based on this, we derive that the application created in this thesis has the same motivation; however, the related work operates with different of kind measure and instrument for solutions (e.g., using microphone and accelerometers to provide early-detection of sleep apnea). As such, we focused on creating an extensible application that allows future developers to create modules on top of our solution (as illustrated in Figure \ref{fig:nidra_modules}). By allowing this, future developers can expedite the innovation in the research and study of sleep-related breathing disorders, as well as allowing the patients to operate with one application.
\begin{figure}
\centering
\includegraphics[scale=0.9]{images/Nidram2.pdf}
\caption{Nidra with the possibility of adding multiple modules that extends the functionality or provide data enrichiment, and integrating support for multiple sensor sources (with the use of the data streams dispatching tool).}
\label{fig:nidra_modules}
\end{figure}
\subsection{Propositional Phase}
The propositional phase according to the report is to \textit{"propose and/or formulate a hypothesis, method or algorithm, model, theory, or solution"}
The solution proposition in this thesis is to create an application used to record, share, and analyze breathing data collected over an extended period. We want to extend the CESAR project by providing an user-interface to the patient to perform these tasks while using the tools that the project facilitates. Mainly, we want to use the data streams dispatching tool in order to manage current and future sensor sources. In which, we proceed to add support for the Flow sensor. In the end, we wish to facilitate an application that is used by the patients to record their breathing data during sleep and to share the data between researchers/doctors. As such, we aid in collecting breathing data used for early detection of sleep-related breathing disorders (e.g., obstructive sleep apnea) from home.
\subsection{Analytical Phase}
The analytical phase according to the report is to \textit{"analyze and explore a proposition, leading to a demonstration and/or formulation of a principle or theory"}
With the proposition phase of this thesis, we analyze the tasks of the application. We separate the tasks into concerns (i.e., recording, sharing, analyzing, modules, storage, and presentation) where we propose a structure that contains components, where each component has various functionalities and design choices. With each concern combined, we fulfill the goals of this thesis. As a demonstration, we realize the design choices by implementing them as an Android application, called Nidra.
\subsection{Evaluation Phase}
The evaluation phase according to the report is to \textit{"evaluate a proposition or analytic finding by means of experimentation (controlled) or observation (uncontrolled, such as a case study or protocol analysis), perhaps leading to a substantiated model, principle, or theory"}
Based on the requirements and goal of this thesis, we evaluate the application by conducting experiments. Some of the experiments include participants that perform various tasks, such that we can observe the outcome of the tasks on participants without prior knowledge or experience of the application. In the end, we evaluate and conclude whether the goals and requirements of this thesis are satisfied.
\section{Thesis Outline}
The thesis is divided into three parts, which the following list presents a general overview of:
\begin{itemize}
\item Part 1: \textbf{Introduction \& Background}
\begin{description}[font=\normalfont\itshape]
\item[Chapter 2: Background] presents the background material necessary for understanding the fundamentals of this thesis. It starts by introducing the CESAR project and the tools provided for data acquisition, as well as a description of the Flow sensor. Finally, an overview of the Android operating with the information required to understand the structure of the application (Nidra).
\item[Chapter 3: Related Work] presents the related work focusing on creating a mobile application to collect physiological data in order to diagnose sleep apnea, and with a brief discussion on how we contribute with novelty and improvements from the related work.
\end{description}
\item Part 2: \textbf{Design \& Implementation}
\begin{description}[font=\normalfont\itshape]
\item[Chapter 4: Analysis and High-Level Design] presents the functional requirements of the application, the tasks derived based on the requirements and goals of the thesis, and the tasks separated into concerns, where we propose a structure of implementation which contains component with various functionalities and design choices. In the end, we discuss the data structure, namely the data entities (i.e., record, sample, module, and user), the data format (JSON versus XML), and the structure of the data packets sent from the sensor sources as well as the data packets sent on sharing.
\item[Chapter 5: Implementation] presents the application components of the project (i.e., Nidra, data streams dispatching module, and the Flow sensor wrapper) with the interface of IPC connectivity. Moreover, it presents the steps and flows taken in order to implement the concerns (i.e., recording, sharing, analyzing, modules, storage, and presentation) by separating the actions and showing how the components in the application interact.
\end{description}
\item Part 3: \textbf{Evaluation \& Conclusion}
\begin{description}[font=\normalfont\itshape]
\item[Chapter 6: Evaluation] presents four experiments conducted in order to evaluate the system requirements of the application. Each experiment has a short description followed up by results, a discussion on improvements and findings, and a conclusion of the experiment. Finally, a conclusion on the system requirements of the thesis.
\item[Chapter 7: Conclusion] presents the summary of the thesis, followed up with contributions that answer the goals defined in the problem statement. Finally, a list of improvements and furture work that can be made to the application.
\end{description}
\end{itemize}
| {
"alphanum_fraction": 0.80031858,
"avg_line_length": 120.397260274,
"ext": "tex",
"hexsha": "3c752273edbcd4277e70fb10f8f7e0ce5571a72e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1f6dfe2c19686782a0acbcc3466b80b1df072156",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RiatIO/Master",
"max_forks_repo_path": "Thesis/sections/introduction/index.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1f6dfe2c19686782a0acbcc3466b80b1df072156",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RiatIO/Master",
"max_issues_repo_path": "Thesis/sections/introduction/index.tex",
"max_line_length": 806,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1f6dfe2c19686782a0acbcc3466b80b1df072156",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RiatIO/Master",
"max_stars_repo_path": "Thesis/sections/introduction/index.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1769,
"size": 8789
} |
\chapter{INTRODUCTION}
\label{chap:introduction}
%Layout the major questions. Then address the answers. Finally, construct the logical flow of necessary statements. Each sentence should be defend-able and contain well defined or cited jargon.
% \section{these are the questions I want to address}
% \begin{itemize}
% \item What field am I in?
% \item Thesis statement: develop a LC in active random media
% \item Historically speaking, how did we get here?
% \item When does AL versus diffusion occur?
% \item What currently exist for criteria?
% \item How are those criteria modeled numerically?
% \item How do I plan on accomplishing the result desired in the thesis statement?
% \end{itemize}
\section{MESOSCOPIC LIGHT TRANSPORT}
\label{sec:what_field_am_i_in}
When wave interference effects can be disregarded, the diffusion equation satisfactorily describes light propagation in random media\mbox{~\cite{2009_Lagendijk_PT,1999_van_Rossum}}. However, when effects due to self-interference in multiple scattering become appreciable, change in transport behavior leads to failure of the diffusion model and thus to a new phenomenon, Anderson localization (AL)~\cite{1958_Anderson}. The concept of AL is mathematically defined\index{localization} in the context of infinite passive random media~\cite{1983_Frohlich,1988_Lifshits,1989_Dreifus}. For finite systems, signatures of AL are related to the strict mathematical definition by scaling theory~\cite{1979_Anderson,1981_MacKinnon_scaling,2006_Markos}. An example of the qualitative change in transport behavior is provided by the transition between diffusion and AL; this transition can be expected to change the dependence of the average transmission on system length $L$ from $\langle T \rangle \propto \ell_{tmfp}/L$ for diffusion to $\langle T \rangle \propto \exp(-L/\xi)$ for AL (e.g.,~\cite{1999_van_Tiggelen}). Here, $\ell_{tmfp}$ is the transport mean free path, and $\xi$ is the localization length (c.f. Appendix~\ref{sec:lengths} for the definitions of different length scales).
Historically, the concept of AL originated in condensed matter physics from the study of electron transport in disordered conductors on the mesoscopic length scale. This scale refers to a system length $L$ at which quantum wave effects alter transport behavior when compared to classical particle-based predictions. For systems in which the phase coherence length $L_{\phi}$ is greater than $L$, the effect of de~Broglie wave interference on electrons must be accounted for~\cite{2009_Lagendijk_PT,1985_Lee,1988_Webb_Washburn,1991_Altshuler}. However, AL is difficult to observe in transport of electrons due to electron-electron and electron-phonon interactions. This obstacle can be overcome by sample preparation and by measuring transport at low temperatures; more importantly, however, researchers have realized that the concept of AL as self-interference of waves in random media applies to any kind of wave, including electromagnetic waves~\cite{1984_John_prl,1985_Anderson}.
For AL as a phenomenon of electron propagation, conservation of charge implies that the number of electrons is constant~\cite{1991_Altshuler}, whereas for light, there is no such constraint. The number of photons in nonconservative media can decrease due to absorption or increase in the presence of gain. Since actual experiments~\cite{2009_Lagendijk_PT,2000_chabanov_nature,1997_wiersma_nature,1991_Genack} take place in finite nonconservative media, it is important to characterize the nature of transport in these systems and to generalize the concept of AL for such media.
\section{CRITERIA FOR DIFFUSION-LOCALIZATION TRANSITION}
\label{sec:thesis_statement}
One of the goals of this dissertation is to develop a localization criterion (LC) in nonconservative random media. To investigate the transition process, a theoretical model for each transport regime is needed. The diffusion equation analytically describes the diffusion process, but AL cannot be described in that framework. Transition between diffusion and AL resists analytical treatment because the diffusion approximation is made based on the assumption that the wavelength~$\lambda$ is much less than $\ell_{tmfp}$, whereas AL is expected when $k \ell_{tmfp} \approx 1$ in three dimensional (3D) random media~\cite{1960_Ioffe_criterion}. Here, $k$ is equal to $2 \pi/\lambda$. Thus, AL cannot use the same particle-based models as diffusion. Although much work has been done with 3D systems, finite quasi-one-dimensional (quasi-1D) media are sufficiently complex to capture the transition from diffusion to AL.
This work investigates localization criteria in nonconservative random media, as described below, using the numerical models of waveguides outlined in Section~\ref{sec:method_numerical}.
% why quasi-1D?
Interest in quasi-1D systems is driven by experiments~\cite{2006_Sheng} and the feasibility of the numerical model capable of demonstrating AL and diffusion phenomena.
Before establishing an LC, transport regimes in nonconservative media must be defined (see Section~\ref{sec:twod_plot}). With the systemization of transport behavior, an LC describes which behavior can be expected. In experiments with random media~\cite{1999_Cao_RandomLaserPRL,2005_Cao} an LC can determine when lasing is due to strong localization rather than to diffusive random lasing~\cite{2008_Wiersma}. Although this work focuses on the transition in the context of light, the results apply to any self-interference of waves in nonconservative media such as acoustics~\cite{1985_Kirkpatrick,2006_Yamilov_Weaver,2008_van_Tiggelen_Nature}.
%astronomy: the book 'Radiative transfer' by Chandrasekhar does not contain the phrase AL
%seismic waves: the book 'Quantitative seismology' by Keiiti Aki, Paul G. Richards does not contain the phrase Anderson localization
To determine whether AL or diffusion (or neither) describes transport of light in passive systems, three regimes are defined. When few scattering events occur in transmission, the ballistic regime is characterized by the average distance between scatterers ($\ell_{scat}$). If a wave encounters a sufficient number of scatterers such that the original direction is completely randomized (see definition of $\ell_{tmfp}$ in Appendix~\ref{sec:lengths}), then diffusive transport behavior is observed. Finally, the localized regime is encountered when the system is longer than the localization length $\xi$. In this case, cumulative scattering leads to coherent self-interference of waves that halts diffusion. A single-parameter can determine which of these three transport regimes a passive experiment is in. The term ``parameter'' refers here to an observable that varies in relation to change in the transport phenomena. For transport of electrons, the ensemble-averaged dimensionless conductance\footnote{Conductance $G=\frac{e^2}{h}Tr(\hat{t}\hat{t}^+)=\frac{e^2}{h}g$~\cite{1988_Stone}} $g$~\cite{1979_Anderson} is the parameter, and for electromagnetic waves the ensemble-averaged transmission $\langle T\rangle$ is equivalent to $g$. Diffusion occurs when this conductance is greater than 1; conductance of less than 1 indicates AL. For passive random media, single-parameter scaling holds that any parameter can determine the applicable transport regime as long as it has one-to-one correspondence with unitless conductance~$g$. Transmission $T$ in photonic systems, the optical counterpart of the electronic conductance~\cite{1988_Stone,1981_Fisher,1981_Soukoulis}, is
\begin{equation}
T = \sum_{a,b} |t_{ab}|^2
\end{equation}
where $t_{ab}$ represents transmission amplitude and phase of the wave for each transverse channel $a$ at output $b$ of the waveguide. For electronic systems, $g$ is the experimentally accessible quantity, whereas in photonic systems one can also measure incident-channel resolved transmission $T_{a}$ and speckle $T_{ab}$:
\begin{equation}
\begin{gathered}
T_a = \sum_b |t_{ab}|^2 \\
T_{ab} = |t_{ab}|^2
\end{gathered}
\end{equation}
A nonconservative medium presents an exception to single-parameter scaling because it breaks this one-to-one correspondence~\cite{1994_Freilikher_absorption}. Although measurement of transmission yields a value, it does not necessarily correspond to a specific transport regime. Transmission greater than 1 may be due to the presence of gain in localized media~\cite{2004_Yamilov_intensity,2006_Yamilov_conductance}, and transmission of less than 1 may be due to absorption present in media in the diffusive regime~\cite{2000_chabanov_nature,1998_Brouwer}. Thus, a two-parameter space is required to describe the LC in nonconservative media, i.e., to determine the strength of gain or absorption in the medium, and to determine what transport regime the equivalent passive system would be in. A criterion, relevant only once specific transport behaviors are well-defined, is needed to characterize an experiment as being in either a diffusive or AL regime.
\section{PASSIVE CRITERIA CURRENTLY AVAILABLE}
\label{sec:passive_criteria}
Currently, there exist a number of LC for passive media. For example, Thouless~\cite{1977_Thouless} showed that the ratio of average width of transmission peak in spectrum $\delta \omega$ for open passive systems to average energy level separation $\Delta \omega$ in closed systems returns a unitless number indicating whether the experiment is described by AL or diffusion:
\begin{equation}
\frac{\delta \omega}{\Delta \omega}= g^{(Thouless)}.
\label{eq:Thouless_passive}
\end{equation}
Just as for $g$, when $\delta \omega/\Delta \omega$ is less than 1, then AL occurs. However, this is not a valid criterion in nonconservative media because the addition of gain also decreases transmission peak width.
Another approach to finding an LC is to recall that the transition from diffusion to AL implies a cessation of the applicability of the diffusion description. The self-consistent theory of AL~\cite{1980_Vollhardt_Wolfle} was developed to modify the diffusion equation to account for wave interference. Without self-interference of waves, the diffusion coefficient~$D_0$ is constant throughout the medium. However, when the path of a wave crosses itself and can coherently self-interfere, the diffusion coefficient decreases. Since path loops cannot form near the boundary of a sample, the diffusion coefficient becomes position dependent~$D(z)$. Thus, the change from constant~$D_0$ to position-dependent $D(z)$ signifies the transition to AL. However, this extension of the application of diffusion has not been shown % previously
to fully describe AL for finite systems.
Besides conductance, $D(z)$, and the Thouless criteria, other possible LC include correlation functions~\cite{2005_Yamilov_correlations} of observables, the inverse participation ratio, and transmission fluctuations. A diversity of criteria facilitates experimental measurement. All the aforementioned criteria are equally valid in passive media due to single-parameter scaling. However, the proposed correlation functions and transmission fluctuations were developed specifically for nonconservative photonic random media. For example Ref.~\cite{2000_chabanov_nature} presents a ratio var$(T/\langle T \rangle)$ in the context of an experiment with microwaves in waveguides with disordered absorbing media. However, this ratio may not be useful in media with gain since $\langle T \rangle$ is not well defined. When gain is present in media, given a sufficient number of disordered realizations, a few will lase, and the average or higher moments of $T$ are ill-defined. To avoid this issue, this dissertation uses conditional statistics to disregard the small number of lasing realizations. A second problem with the var$(T/\langle T \rangle)$ ratio as a criterion is that $T$ diverges as the amount of gain in a medium increases. Section~\ref{sec:te_ratio_candidate} presents an LC that addresses these issues.
\section{\texorpdfstring{$T/{\cal E}$}{T/E} AS DIFFUSION-LOCALIZATION CRITERION}
\label{sec:te_ratio_candidate}
In media with gain, transmission~$T$ of light theoretically diverges as the gain approaches the random lasing threshold (RLT). (Since a saturation mechanism is model specific, models here are restricted to having gain below the RLT.). To eliminate the divergence~$T$ can be normalized by the energy in the medium~${\cal E}$. Although both quantities diverge at the RLT, combining the diffusion equation in nonconservative media~\cite{2010_Payne_TE} and conservation of energy show that the ratio $T/{\cal E}$ approaches a constant. Starting from conservation of energy \mbox{(${\cal E} =\int_0^L{\cal W}(z)dz$)} with respect to flux~$J$,
\begin{equation}
\frac{\partial {\cal W}}{\partial t} + \vec{\nabla} \cdot \vec{J} = \frac{{\cal W} c}{\ell_g} + J_0 \delta(z-z_p)
\label{eq:conservation_flux}
\end{equation}
where $z_p$ is penetration depth, $J_0$ is incident flux, $\ell_g$ is gain length, and $c$ is the speed of light. Assuming a steady state in one dimension,
\begin{equation}
\frac{d J_z}{dz} = \frac{{\cal W} c}{\ell_g} + J_0 \delta(z-z_p).
\label{eq:oned_no_time_JW}
\end{equation}
Both sides are then integrated with respect to $z$ over the length of the medium to get the equation for conservation of energy for a nonconservative medium:
\begin{equation}
T + R -1 = {\cal E} \frac{c}{\ell_g J_0}.
\label{eq:conservation_energy_active_medium}
\end{equation}
In the limit that gain length $\ell_g$ approaches RLT (critical gain length~$\ell_{g_{cr}}$), both $T$ and $R$ go to infinity. Assuming $T \approx R$,
\begin{equation}
\frac{T}{{\cal E}} = \frac{c}{2 \ell_{g_{cr}}J_0}.
\label{eq:TE_RLT_limit}
\end{equation}
This constant is disorder-specific due to $\ell_{g_{cr}}$, so $T/{\cal E}$ must be determined before averaging or higher moments.
For gain below RLT, by comparing the $\langle T / {\cal E} \rangle $ measured in an experiment to the value predicted by diffusion, the deviation would be due to wave interference effects (and thus constitute a signature of AL). For passive media, deviation from the diffusion prediction for $\langle T / {\cal E} \rangle $ is related~\cite{2010_Payne_TE} to the well-established~\cite{2008_Cherroret} $D(z)$ based on the self-consistent theory of AL (see Appendix~\ref{sec:appendix_TE_Dz_relation}):
\begin{equation}
\left\langle \frac{ T }{{\cal E}}\right\rangle \approx \frac{1}{J_0} \frac{2 D_0}{L^2} \left(\frac{1}{L} \int_0^L \frac{D_0}{D(z)} dz \right)^{-1}.
\label{eq:TE_related_Dz}
\end{equation}
Since $\langle T /{\cal E}\rangle$ is related to~$D(z)$, then experimentally $\langle T /{\cal E}\rangle$ should behave as $D(z)$ does with respect to $D_0$ for passive media; that is, it should decrease as self-interference of waves increases. Therefore, $\langle T /{\cal E}\rangle$ appears to be a good LC in nonconservative media since it is measurable, does not diverge in media with gain, and is related to an established passive criterion~$D(z)$.
\section{METHOD OF STUDY OF CRITERIA FOR DIFFUSION- LOCALIZATION TRANSITION}
\label{sec:method_numerical}
%\section{What methods are used by others?}
When P.~W.~Anderson initiated the field of localization due to self-interference of waves, he did so using a new model for solid state transport, the Anderson tight-binding Hamiltonian~\cite{1958_Anderson}, which applies to arbitrary medium size and dimension. For quasi-1D geometry, random matrix theory (RMT)~\cite{1951_Wigner,1997_Beenakker,2009_Beenakker} % origin, review, review
%uses the fact that the total transfer matrix is unitary, but the elements of that matrix are assumed to be random.
%%% get citations from page 103 of Vellekoop's thesis
%Then the effect on various parameters is observed when a few more scatterers are added to the medium using transfer matrices (perturbing the length of the waveguide).
is widely used. However, neither of these approaches is able to describe the electric field (and thus the total energy ${\cal E}$) inside a random medium.
%How will I accomplish the result desired in the thesis statement?
To study the AL phenomenon in nonconservative random media, the present work has developed two numerical models. The first is a one-dimensional (1D) set of layers of dielectric material with random widths separated by empty space. This model was developed to find transmission ($T$) and energy inside the medium (${\cal E}$) as a possible criterion $T/{\cal E}$ for nonconservative media~\cite{2010_Payne_TE,2010_Payne_loc_criterion}. The ratio $T/{\cal E}$ has been verified as nondivergent, even as the amount of gain approaches the lasing threshold (as expected). The 1D system was used because AL is known to occur in this system and diffusion is not possible; thus, the effects cannot be due to diffusion.
% based on work of slab with nonconservative media in diffusive regime
% T/E related to D(z)
% (T_g/E_g)E_p reduces to g in passive media
Since diffusion is not possible in 1D systems, a planar quasi-1D metallic waveguide model with randomly-placed scattering potentials was developed to study the simplest diffusion-AL transition and to investigate the other listed criteria ($D(z)$, inverse participation ratio, $T/{\cal E}$). This model is necessary since, even for passive systems, the literature offers no plot of $D(z)$ in the diffusive regime (c.f. Fig.~\ref{fig:Dz_passive}).% and based on Maxwell's equations.
\begin{figure}
\vskip -0.5cm
\centerline{
\scalebox{.5}{\includegraphics{pictures/Dz_passive}}}
\vskip -0.5cm
% NOTE: if a short caption is needed for figure list, use
%\caption[short desk]{long desk}
\caption[Position-dependent diffusion coefficient~$D(z)$ as predicted by self- consistent theory of localization (smooth red curves) and numeric results (rough blue lines) for quasi-1D waveguides with randomly-placed passive scattering potentials for varying system length~$L$, constant scatterer density, and width~$W$.]{Position-dependent diffusion coefficient $D(z)$ as predicted by self-consistent theory of localization (smooth red curves) and numeric results (rough blue lines) for quasi-1D waveguides with randomly-placed passive scattering potentials for varying system length~$L$, constant scatterer density, and width~$W$. Very good agreement for ballistic ($L=100 \lambda$), diffusive, and localized ($L=800 \lambda$) regimes. The term $\ell$ is transport mean free path, and $z_0$ is penetration depth.}
\label{fig:Dz_passive}
\end{figure}
%Method: How am I numerically modeling these criteria?}
To develop numerical models that can simulate wave transport in nonconservative media for individual realizations of disorder, this work implements the transfer matrix method~\cite{1981_MacKinnon_scaling,1992_Pendry,2003_Kettemann}
% note: when MacKinnon cites tmm, he uses 1981_MacKinnon_scaling and
% MacKinnon A 1994 J. Phys. Condensed Matter 6, 2511
for the entire waveguide. Essentially, the transfer matrix method matches boundary conditions before and after an event in a system where wave modes are quantized. Not only is the quasi-1D geometry experimentally viable~\cite{2009_Genack_PRB}, it also provides a convenient theoretical framework~\cite{1982_Dorokhov_DMPK,1988_Mello_Kumar_DMPK}. Here, waveguides described as ``quasi-1D'' have the following characteristics: (1)~quantized transverse modes due to boundary conditions, expressed as $E(y=0,W;\forall z)=0$ as for metallic edges, (2)~waveguide width $W$ less than $\ell_{tmfp}$ so that no significant transverse propagation occurs, and (3)~aspect ratio ($L:W$) is not fixed (i.e., $W$ is constant when $L$ is increased, with a fixed disorder density). Further, the propagation is confined to two dimensions in order to study a single polarization of electromagnetic radiation.
As shown in Appendix~\ref{sec:appendix_derivation_transfer_matrices_quasi1d}, the differential wave equation
\begin{equation}
\nabla^2 E(\vec{r}) = - \frac{\omega^2}{c^2} E(\vec{r})
\label{eq:wave_equation_electric_field_introduction}
\end{equation}
can be separated into perpendicular and parallel components (resolving wave vector $\vec{k}$ into $k_{\perp}$ and $k_{\parallel}$). Once the electric field solution is found, scattering potentials are introduced, initially as $\delta$ functions. The derivation of the transfer matrix method is \textit{ab initio} based on Maxwell's equations~\cite{1999_Jackson}, and no assumption about transport mean free path is made.
For light waves, transverse wave quantization means that the modes of an electric field and its derivative can be written in the
form of a vector. The translation of that field in vector form through a dielectric-filled space or past a scattering potential is described by a matrix, the rank of which is dependent on the number of transverse modes of the waveguide (c.f. Appendix~\ref{sec:appendix_derivation_transfer_matrices_quasi1d}). In 1D, the transfer matrix method takes the initial electric field $E_0$ and its derivative $E_0^{\prime}$ and translates to the field and its derivative over distance $\Delta x$:
\begin{equation}
\left( \begin{array}{cc}
t_{11} & t_{12} \\
t_{21} & t_{22} \\
\end{array} \right)
\left( \begin{array}{c}
E_0 \\
E_0^{\prime}
\end{array} \right)
=
\left( \begin{array}{c}
E_{\Delta x} \\
E_{\Delta x}^{\prime}
\end{array} \right).
\end{equation}
Multiple scattering events are combined as $\hat{T}_{total} = \prod_i \hat{T}_i$. The product describes the effect of the medium on the transport of the incident light. Since the transfer matrices have finite rank, the scattering potentials used are actually a finite summation of Fourier components of the $\delta$ function. Although the purpose of the numerical model is a study of photonic transport in nonconservative media, the resulting electric field magnitude, plotted in Fig.~\ref{fig:electric_field_zoomed}, is a secondary benefit.
\begin{figure}
\vskip -0.5cm
\centerline{
\scalebox{.47}{\includegraphics{pictures/electric_field_resonant_freq_zoom_normalized_z_box_arrows}}}
\vskip -0.5cm
% NOTE: if a short caption is needed for figure list, use
%\caption[short desk]{long desk}
\caption[Magnitude of electric field inside a quasi-1D waveguide for passive media in the diffusive regime.]{Magnitude of electric field inside a quasi-1D waveguide for passive media in the diffusive regime. Midsection of waveguide is shown (from $z/L=80/200$ to $z=120/200$) for a resonant frequency (higher than average transmission). Spatially varying field intensity (with continuous wave incident flux) demonstrates interesting microscopic behavior, even though the system is in the diffusive regime.
\label{fig:electric_field_zoomed}}
% The Poynting vector can plotted
% [don't say this because you aren't including a picture of the Poynting vectors.]
\end{figure}
The transfer matrix method is used in the field of transport~\cite{2007_Froufe-Perez_PRE}, but its application is usually limited either to RMT for perturbative study or directly only to the diffusive regime. These limitations are due to the fact that multiplication of numerical matrices results in inaccuracy due to divergent eigenvalues in the product~\cite{1968_Osedelec}.
% A simple showing of this would be nice
The numerical inaccuracy is detectable since each transfer matrix has determinant unity. % (due to conservation of flux?)
The product of the matrices must retain a determinant of unity since
%citation would be nice here, but most linear algebra books leave it as an exercise to the reader
%\begin{equation}
$\rm{det}(\hat{A})\rm{det}(\hat{B})=\rm{det}(\hat{A}\hat{B})$.
%\end{equation}
A self-embedding technique renormalizes the divergent eigenvalues and make this approach feasible~\cite{1999_yamilov_selfembed,1976_Bellman_Wing_embedding}.
%Although self-embedding technique applies to any numerical multiplication of many matrices, it is applied here to waveguides. %1D and planar quasi-1D waveguide geometries.
%In passive media, conservation of energy implies $T+R=1$ which is checked for validity of results.
The reliability of the transfer matrix method with self-embedding is demonstrated by comparing numerical simulation results of average unitless conductance $\langle g \rangle$ versus variance var$(g)$ to data yielded by a theoretical supersymmetry-based approach~\cite{2000_Mirlin}. With no fitting parameters, there is very good agreement (c.f. Fig.~\ref{fig:Mirlin_supersymmetry_g_varg}). Similarly, the diffusion coefficient from numerical simulation of passive media matches expected $D(z)$ (c.f. Fig.~\ref{fig:Dz_passive}).
\begin{figure}[t]
\vskip -0.5cm
\centerline{
\scalebox{.5}{\includegraphics{pictures/var_g_versus_g_no_closed_channels}}}
\vskip -0.5cm
% NOTE: if a short caption is needed for figure list, use
%\caption[short desk]{long desk}
\caption[Theoretical prediction based on supersymmetric approach for average unitless conductance $g$ versus variance of $g$ for quasi-1D waveguide~\cite{2000_Mirlin} compared to results from numerical simulations described in Section~\ref{sec:method_numerical}.]
{Theoretical prediction based on supersymmetric approach for average unitless conductance $g$ versus variance of $g$ for quasi-1D waveguide~\cite{2000_Mirlin} compared to results from numerical simulations described in Section~\ref{sec:method_numerical}. No fitting parameters are used and good agreement is found. The $15/2$ accounts for the geometry of the waveguide. Many realizations of random media for each waveguide determined $\langle g \rangle$ and var$(g)$ for waveguides of two different widths (with the number of open channels $N_{open}$ determined by $W$) and varying system length $L$. The supersymmetry-based approach assumes the limit of an infinite number of propagating modes, but $N_{open}$ equal to 10 and 20 is sufficient.\label{fig:Mirlin_supersymmetry_g_varg}}
\end{figure}
\section{OUTLINE OF TRANSPORT REGIMES}
\label{sec:twod_plot}
To guide the study of the extension of the three passive regimes in nonconservative media, a two-parameter diagram (c.f.~Fig.~\ref{fig:regime_plot_main}) % Spring semester 2009, Ben and Dr Yamilov
enumerates types of transport behavior. The first parameter is system length $L$, which varies in relation to constant disorder density and waveguide width for passive media. The second parameter is gain or absorption strength. The two-parameter plot is needed to define specific signatures of diffusion and AL. The chapters that follow use the numerical model of waveguides to verify transitions between types of transport and to characterize behavior of LC such as the proposed $T/{\cal E}$ in nonconservative random media.
A single-valued parameter such as~$T/{\cal E}$ is useful even in this two-parameter space because it indicates only whether diffusion or AL descriptions apply to transport. However, not all single-valued LC are applicable for these systems due to the divergence of most observable parameters as RLT is approached with increased gain. Before determining which side of diffusion or AL is characterized by~$T/{\cal E}$, the behavior on both sides must be defined. Currently, no clear definitions of AL or diffusive behavior exist for nonconservative random media.
%\section{what is the plan?}
Figure~\ref{fig:regime_plot_main} describes types of transport in quasi-1D waveguides with random media; it has three passive regimes: ballistic (\textbf{B}), diffusive (\textbf{D}), localized (\textbf{L}) on the horizontal axis and gain (\textbf{G}) or absorption (\textbf{A}) strength on the vertical axis. The two-letter combinations on the plot denote a regime of specific behavior. The passive regime transitions (B/D/L) are characterized by the transport mean free path~$\ell_{tmfp}$ and localization length~$\xi$, as described in Section~\ref{sec:thesis_statement}. All lengths are normalized by wavelength~$\lambda$.
\begin{figure}[t]
\vskip -.8cm
% when scalebox=0.65, -2 gives the figure alone on page, -3cm give no margin at the top.
% when scalebox=0.65, -.8 is figure alone
% when scalebox=0.45, -2 gives not enough margin at top; -.5 and -.8 looks good
\centerline{
\scalebox{.65}{ % 0.65 would be largest
\includegraphics{pictures/regimes_plot_main}}}
\vskip -0.5cm
\caption[Various types of transport phenomena denoted by two-letter abbreviations (see text for explanation).]{Various types of transport phenomena denoted by two-letter abbreviations (see text for explanation). Each region is a permutation of the inequality of relevant characteristic lengths. Passive (conservative) transport regimes are on the horizontal axis, assuming constant disorder density and varying system length $L$. Plotted vertically, amounts of absorption or gain (nonconservative media) increase with distance away from the passive system horizontal axis.
%Regime plot for quasi-1D random media. Lines denote transitions between regions of similar transport behavior; each region is denoted by a two-letter abbreviation. See text for explanation.
\label{fig:regime_plot_main}
}
\end{figure}
%It is important to note that characteristic lengths are defined in specific regimes. For example,
%For brevity, only absorption is referred to, but the concepts apply equivalently to gain.
The absorption (gain) rate $\gamma_{a,g}$ is the average number of absorption events per unit time, where an event refers to the particle removed (doubled) along a specific path. The absorption (gain) rate is the inverse of the absorption (gain) lifetime,
%\begin{equation}
$\gamma_{a,g} = \frac{1}{\tau_{a,g}}$,
%\end{equation}
where $\tau_{a,g}$ is the average propagation time of the particle before it is absorbed (doubled). The averaging is over many random particle paths. Given a characteristic time $\tau_{a,g}$, the characteristic absorption or gain length is
%\begin{equation}
$\ell_{a,g} = \tau_{a,g} c$,
%\end{equation}
where $c$ is the propagation speed of the particle. This characteristic length is the average distance prior to absorption (doubling) with respect to the path length. The $\ell_{a,g}$ is determined from the time-dependent diffusion equation in one dimension,
\begin{equation}
D \frac{\partial^2 I}{\partial z^2} = \frac{\partial I}{\partial t},
\label{eq:diffusion_equation_1D}
\end{equation}
to be
% see /svn/bens/lab_notebook/20090422_dr_Yamilov_mtg_regime_change_for_plot.pdf
% equation 37
\begin{equation}
\ell_{a,g}= \left( \frac{d}{\pi^2}\right) \frac{L^2}{\ell_{tmfp}}.
\label{eq:ballistic_gain_abs}
\end{equation}
However, $\ell_{a,g}$ is already defined in the ballistic regime as the average length after which the particle is no longer present in a ballistic system due to absorption (doubled when gain is present). The system length $L$ (how far the particle would have gone along a ballistic path) should be replaced by a new diffusive-regime length, $\xi_{a,g}$. Eq.~\ref{eq:ballistic_gain_abs} can then be solved for $\xi_{a,g}$:
\begin{equation}
\xi_{a,g} = \sqrt{\frac{\ell_{a,g} \ell_{tmfp}}{d}}.
\label{eq:diffusive_absorption_length}
\end{equation}
Physically, $\xi_{a,g}$ is the average length after which the particle is no longer present in a multiple-scattering system. To distinguish the two absorption (gain) lengths, $\xi_{a,g}$ is measured with respect to system length $L$ (rather than path length $L_D$), whereas $\ell_{a,g}$ is measured with respect to path length $L_D$. If $L$ is equal to $L_D$, then no diffusion is occurring and $\ell_{a,g}$ is equal to $\xi_{a,g}$. Usually, the literature does not distinguish between measurement of an absorption length with respect to diffusive path $L_D$ or measuring it with respect to system length $L$. There are two reasons for this ambiguity: first, experimentally, $L_D$ is harder to measure than $L$; second, the regime to which various lengths apply to is generally not specified.
For localized systems, it no longer makes sense to measure lengths with respect to path length since wave effects are dominant (i.e., ray optics do not apply). In this regime $\xi_{a,g}$ is used, but it is not defined in terms of $\ell_{a,g}$ as in Eq.~\ref{eq:diffusive_absorption_length}. The transition indicating whether or not absorption affects AL or not is set by $\xi_{a} = \xi$ (the horizontal line between $AD_3$ and $AL_3$ in Fig.~\ref{fig:regime_plot_main}). This transition in the diffusive regime is found by applying Eq.~\ref{eq:diffusive_absorption_length} to $\xi_{a,g} = \xi = N_{open} \ell_{tmfp}$ and solving
\begin{equation}
N_{open}^2 \ell_{tmfp}^2 = \frac{\ell_{tmfp}\ell_{a,g}}{d}
\end{equation}
to get $\ell_{a,g} =d N_{open}^2 \ell_{tmfp}$. For the diffusive regime, this line indicates how much absorption (gain) is necessary to distinguish transport behavior from a passive system. The remaining curves in Fig.~\ref{fig:regime_plot_main} are derived from the density of state transitions, rather than the characteristic lengths.
For passive media, the width of peaks in transmission with respect to frequency ($\delta \omega$ of the Thouless criterion in Eq.~\ref{eq:Thouless_passive}) is inversely proportional to the escape lifetime (the average time until an input leaves the system). To account for absorption or gain, an additional term is needed~\cite{2005_Yamilov_correlations} in the form of a rate:
%\begin{equation}
$\delta \omega +\gamma_{a,g}$.
%\end{equation}
Although the width of DOS $\Delta \omega$ also changes as a function of gain due to the Kramers-Kronig relation~\cite{1999_Jackson}, the perturbation can be disregarded since the amount of gain and absorption of interest is small.
% boundaries
The Thouless criterion is adapted to nonconservative media by inclusion of the gain (sometimes referred to as negative absorption~\cite{1968_Letokhov}) or absorption rate $\gamma_{a,g}=\mp~c/\ell_{a,g}$:
\begin{equation}
\delta'=\frac{\delta \omega +\gamma_{a,g}}{\Delta \omega};
\label{eq:generalized_thouless}
\end{equation}
it is plotted as the red curve $\delta'=1$. Physically, this boundary signifies whether the width of quasi-modes or separation of spectral peaks is larger. An additional boundary introduced by inclusion of nonconservative media occurs when absorption or gain overcomes radiative leakage of an average quasi-mode of the system, as plotted by the black curve $\pm \gamma = \delta \omega$. %The remaining boundaries are straight lines and are determined by enumerating permutations of characteristic length scale inequalities. For example, in the diffusive regime, $L$, $\xi$, $\ell_{tmfp}$, and $\ell_{a,g}$ form a minimum basis for distinguishing transport behavior.
%Enumerating all permutations of relevant length inequalities, a distinct set of transport behaviors has been found.
% caveat
Although each region is separated by a line in Fig.~\ref{fig:regime_plot_main}, the transition between regimes is actually continuous due to the use of many realizations of randomly placed scatterers. Given the boundaries between each region, two-letter abbreviations are defined for each unique transport behavior.
% transport behavior of each regime
In the ballistic regime $GB_1$, gain below ballistic lasing threshold is not expected to change transport behavior (and similarly for $AB_1$ when $\ell_a < L$). For a small amount of absorption or gain in regions $AD_1$ and $GD_1$, the diffusive transport is also expected to remain similar to passive media. The use of conditional statistics~\cite{2005_Yamilov_correlations} eliminates a small number of lasing media. With sufficient absorption, signatures of diffusion are reduced ($AD_2$) and suppressed ($AD_3$). In contrast, gain enhances fluctuations ($GL_1$) and leads to lasing ($GL_2$) on average for many realizations~\cite{1968_Letokhov}. Transport in region $GD_2$ is the equivalent of ``negative absorption'' in region $AD_2$. The remaining absorption regimes signify transition from distinct spectral peaks and leakage due to radiation ($AL_1$) to distinct spectral peaks with absorption dominating leakage ($AL_2$) to a continuous spectrum due to absorption with weak localization ($AL_3$).
% the hump of AL_2 is not separated because?
%The kink in the curves is the transition from diffusion based equations to Mirlin's projections~\cite{2000_Mirlin} in the localized regime.
% if I only have 10 minutes of the presentation, I will not have time for these regions
\begin{comment}
\begin{figure}
\vskip -0.5cm
\centerline{
\scalebox{.75}{\includegraphics{pictures/regimes_plot_upper}}}
\vskip -0.5cm
%\caption[Upper regime plot (strong absorption) for quasi-1D random media.]{Upper regime plot (strong absorption) for quasi-1D random media. Each region is associated with an inequality of lengths. See text for explanation.}
\label{fig:regime_plot_upper}
\end{figure}
\begin{figure}
\vskip -0.5cm
\centerline{
\scalebox{.75}{\includegraphics{pictures/regimes_plot_lower}}}
\vskip -0.5cm
%\caption{[Lower regime plot (strong gain) for quasi-1D random media.]Lower regime plot (strong gain) for quasi-1D random media. See text for explanation.}
\label{fig:regime_plot_lower}
\end{figure}
\end{comment}
% Note: analytical quasi-1D solution found by~\cite{1994_Beenakker_exact}
%I expect to be successful because I have sufficient tools (numerical model) and the plan is detailed and well-defined. Also, it is not too broad or too narrow.
To verify the boundaries and transport behaviors specified in Fig.~\ref{fig:regime_plot_main}, the numerical model for waveguides with random media is used to measure the criterion $T/{\cal E}$. In addition to determining the applicability of other LC such as $D(z)$, correlation functions, and the inverse participation ratio, this system makes possible the study of myriad other interesting topics. Examples include the effect of closed channels with gain~\cite{2010_Payne_closed}, wave front shaping~\cite{2008_Vellekoop_Mosk} to change transmission or focus field inside the medium, eigenmodes of transmission~\cite{1986_Imry}, and the visualization of Poynting vector field loops. The numerical model developed serves as a robust method for a comprehensive approach to investigating the transition from diffusion to AL for waveguides with nonconservative random media.
In this dissertation, each of the chapters are either published or in the process of submission to a peer-reviewed journal. Thus each chapter has an abstract, introduction, and conclusion. The first paper, Chapter~\ref{chap:TE_gain}, describes the applicability of the ratio of transmission to energy stored in a random media as a criterion for localization. Although both of these parameters diverge in the presence of optical gain, the ratio for each random medium does not. This criterion is developed in the context of a diffusive slab and also a numerical model of one dimensional layers of dielectric material. Since the lowest dimension for which the transition from diffusion to Anderson localization occurs in quasi-1D, there is a need for how to describe transport regimes with non-conservative media exists. The second paper, Chapter~\ref{chap:regimes}, details the development of boundaries between transport regimes in the two dimensional phase space for random media with gain and absorption. Another complication of the quasi-1D geometry is the inclusion of evanescent channels, which is studied in Chapter~\ref{chap:closed_channels}. We find that the effect of inclusion of evanescent channels is equivalent to renormalizing the transport mean free path. The last paper on random media in this dissertation, Chapter~\ref{chap:Dz_absorb}, demonstrates the validity of the position dependent diffusion coefficient $D(z)$ in the localized regime and in systems with absorption.
The remaining two chapters cover media with correlated disorder. Although random media exhibits unusual behavior, reproducibility is desirable for manufacturing. Thus algorithms specifying the non-random disorder (deterministic aperiodic systems) are of interest. The Thue-Morse pattern has a singular continuous Fourier spectrum, but this does not directly predict what transport properties are expected. In Chapter~\ref{chap:TM_to_TB} a mapping of the two dimensional Thue-Morse pattern is made to the tight-binding model. Then Chapter~\ref{eq:TM_physics} covers the anomalous transport properties, i.e.~coexistence of localized and extended states, exhibited by the Thue-Morse pattern.
| {
"alphanum_fraction": 0.7903117957,
"avg_line_length": 129.8360128617,
"ext": "tex",
"hexsha": "166e6ae8d1507d3e53c84e49f1d5b98bd35189b2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "bhpayne/physics_phd_dissertation",
"max_forks_repo_path": "chapters/introduction_draft3.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "bhpayne/physics_phd_dissertation",
"max_issues_repo_path": "chapters/introduction_draft3.tex",
"max_line_length": 1678,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "646123088fdd226e8677e6f3edb8d109be96994e",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "bhpayne/physics_phd_dissertation",
"max_stars_repo_path": "chapters/introduction_draft3.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9884,
"size": 40379
} |
\section{Conclusion}
\label{JM:sec:CONC}
\textit{Julia} set out to offer high-performance computation while providing clean and easily readable syntax, tackling the \textit{two language problem} common
in today's software. As this report shows, most of these promises have been fulfilled. Focusing on essential features for numerical linear algebra in the core installation, it readily provides necessary
tools to conduct state-of-the-art research. MD allows for effective reuse and extension of methods and types, leading to a growing amount of
packages that form a densely connected ecosystem with specialized scopes but familiar notation.
Many of the algorithms provided reach high performance or are easily boosted via accessible code or type transformations via macros. While being fast and offering a versatile collection of algorithms on its own, \textit{Julia} offers wrapper packages from and to
several languages. This enables easy reuse of other sources and generation of APIs.
While it can be used as a general programming language, most of the recently published research is still focused on classical
fields like numerics, simulation, and control or physics, see e.g. \cite{JMJarlebring2019, JMForetsEtAl2020, JMKalubaEtAl2021}.\\
An important, missing feature is the lack of support to generate independent executables and no hard real-time guarantees\footnote{To the best knowledge of the author.}. Exceptions, like \cite{JMKoolenDeits2019}, exist but are rare.
In addition, \textit{Julia} is still early in its maturation cycle, which still lowers its acceptance rate and userbase. \textit{Python} became prominent around 2010, nearly 20 years after its initial release.
This might increase the difficulties of many new users who have been trained to code in an OOP paradigm, which can make moving to \textit{Julia} challenging.\\
The used code is available at \href{https://github.com/AlCap23/ScientificComputingSeminar2021}{GitHub} in addition to the presentation.
| {
"alphanum_fraction": 0.8091908092,
"avg_line_length": 80.08,
"ext": "tex",
"hexsha": "035977f707b57b0ce84151e56c7b5e25bf787e3b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1adf9984f2948d3b990036ffb616fd1b2ffc2ae6",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "AlCap23/ScientificComputingSeminar2021",
"max_forks_repo_path": "report/contents/06_conclusion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1adf9984f2948d3b990036ffb616fd1b2ffc2ae6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "AlCap23/ScientificComputingSeminar2021",
"max_issues_repo_path": "report/contents/06_conclusion.tex",
"max_line_length": 263,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1adf9984f2948d3b990036ffb616fd1b2ffc2ae6",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "AlCap23/ScientificComputingSeminar2021",
"max_stars_repo_path": "report/contents/06_conclusion.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 441,
"size": 2002
} |
\documentclass[12pt]{beamer}
\usepackage{amsmath}
\usepackage{xcolor}
\usepackage{subfigure}
\usepackage{bbm}
\usepackage{pgfpages}
\usepackage{tikz}
\usepackage{dcolumn}
\usepackage{booktabs}
\newcolumntype{M}[1]{D{.}{.}{1.#1}}
\usetikzlibrary{positioning,shapes,arrows}
\usepackage{pgf,pgfarrows,pgfnodes,pgfautomata,pgfheaps,pgfshade}
\usetheme{Air}
%\pgfpagesuselayout{4 on 1}[letter,border shrink=5mm]
%\pgfpageslogicalpageoptions{1}{border code=\pgfusepath{stroke}}
%\pgfpageslogicalpageoptions{2}{border code=\pgfusepath{stroke}}
%\pgfpageslogicalpageoptions{3}{border code=\pgfusepath{stroke}}
%\pgfpageslogicalpageoptions{4}{border code=\pgfusepath{stroke}}
\DeclareMathOperator*{\argmax}{arg\,max}
\title[ISMIR 2021 tutorial]{ISMIR 2021 Tutorial}
\logo{\pgfputat{\pgfxy(-0.5,7.5)}{\pgfbox[center,base]{\includegraphics[width=1.0cm]{figures/uvic}}}}
\beamertemplatenavigationsymbolsempty
\defbeamertemplate{footline}{author and page number}{%
\usebeamercolor[fg]{page number in head/foot}%
\usebeamerfont{page number in head/foot}%
\hspace{1em}\insertshortauthor\hfill%
\insertpagenumber\,/\,\insertpresentationendpage\kern1em\vskip2pt%
}
\setbeamertemplate{footline}[author and page number]{}
\subtitle[Teaching Music Information Retrieval]{Teaching Music Information Retrieval}
\date[2021]{2021}
\author[G. Tzanetakis]{George Tzanetakis}
\institute[University of Victoria]{University of Victoria}
%\logo{\includegraphics[scale=.25]{unilogo.pdf}}
\begin{document}
\frame{\maketitle} % <-- generate frame with title
\AtBeginSection[]
{
\begin{frame}<beamer>[allowframebreaks]{Table of Contents}
\tableofcontents[currentsection,currentsubsection,
hideothersubsections,
sectionstyle=show/shaded,
]
\end{frame}
}
%% NOTEBOOK EXAMPLES
%% measuring_amplitude *
%% monophonic pitch detection
%% matrix factorization
%% rhythm notation *
%% audiolabs-erlangen.de/resources/MIR/FMP/C3/C3_MusicSynchronization.html
%% THX sound from Steve Toja MIR notebooks
%% Lyrics classification
\section{Introduction}
\begin{frame}{Introduction}
Music Information Retrieval as a research field is now more than 20
years old. I have been involved in teaching MIR in undergraduate,
graduate, and tutorial courses to students from a variety of
disciplines for over 15 years. In this tutorial we will explore
different aspects of teaching MIR and share what I have learned over
the years about how to make the teaching of MIR topics more effective.
\end{frame}
\begin{frame}{Tutorial Structure}
There are 6 modules each with a duration of approximately 25 minutes
followed by 5 minutes of questions. Each unit consists of a set of
slides and in some cases some associated hands-on demonstrations in
the form of Jupyter/Python notebooks. The target audience is anyone
(professor, researcher, postdoc, graduate student) who is interested
in teaching MIR. I assume that participants are familiar with the main
ideas and tasks of MIR.
\end{frame}
\begin{frame}{Modules}
\begin{itemize}
\item Overview and organization
\item Adapting to a target audience and format
\item Online learning
\item Assessment
\item Projects
\item Resources
\end{itemize}
\end{frame}
\begin{frame}{Relevant Background}
\begin{itemize}
\item{Main focus of my research has been Music Information Retrieval (MIR)}
\item{Involved from the early days of the field}
\item{Have published papers in almost every ISMIR conference}
\item{Organized ISMIR in 2006}
\item{Tutorials on various MIR topics in several conferences}
\item{Taught MIR as a 4th year CS course for 12 times}
\item{Kadenze MIR program (3 courses) approximately 3000 participants}
\end{itemize}
\end{frame}
\begin{frame}{Education and Academic Work Experience}
\begin{itemize}
\item{1997 BSc in Computer Science (CS), University of Crete, Greece}
\item{1999 MA in CS, Princeton University, USA}
\item{2002 PhD in CS, Princeton University, USA}
\item{2003 PostDoc in CS, Carnegie Mellon University, USA}
\item{2004 Assistant Professor in CS, Univ. of Victoria, Canada}
\item{2010 Associate Professor in CS, Univ. of Victoria, Canada}
\item{2016 Professor in CS, Univ. of VIctoria, Canada}
\item{2010-2020 Canada Research Chair (Tier II) in Computer Analysis of Audio and Music}
\item{Music theory, saxophone and piano performance, composition,
improvisation both in conservatory and academic settings}
\end{itemize}
\end{frame}
\begin{frame}{Work Experience beyond Academia}
Many internships in research labs throughout studies. Several
consulting jobs while in academia. A few representative examples:
\begin{itemize}
\item{Moodlogic Inc (2000). Designed and developed one of the earliest audio fingerprinting systems (patented) - 100000 users matching to 1.5 million songs}
\item{Teligence Inc (2005). Automatic male/female voice discrimination for voice messages used in popular phone dating sites - processing of 20000+ recordings per day.}
\item{Smule (2015-present) - various MIR related projects}
\end{itemize}
\end{frame}
\begin{frame}{Software - Marsyas}
\begin{itemize}
\item{Music Analysis, Retrieval and Synthesis for Audio Signals}
\item{Open source in C++ with Python Bindings}
\item{Started by me in 1999 - core team approximately 4-5 developers}
\item{Approximately 400 downloads per month}
\item{Many projects in industry and academia}
\item{State-of-the-art performance while frequently orders of
magnitude faster than other systems}
\item{Not actively developed for the last 5-6 years}
\end{itemize}
\end{frame}
\begin{frame}{Visiting Scientist at Google Research}
Six month study leave. Things I worked on (of course as part of larger teams):
\begin{itemize}
\item{Cover Song Detection (applied to every uploaded YouTube video).}
\item{Audio Fingerprinting}
\item{Named inventor on 6 pending US patents related to audio matching and fingerprinting}
\end{itemize}
\end{frame}
\section{Overview - Organization}
\begin{frame}{UVic MIR Course - Learning outcomes}
\begin{itemize}
\item{Basic knowledge of DSP}
\item{Basic knowledge of Machine Learning (ML)}
\item{Basic knowledge of Music Theory}
\item{Familiarity with the basic tasks that have been
explored in MIR research and the algorithms used to solve them}
\item{{\bf Being able to read and understand the majority
of published literature in ISMIR}}
\item{Experience with designing and developing MIR algorithms and
systems}
\end{itemize}
\end{frame}
\begin{frame}{UVic MIR Course - Lectures}
\begin{itemize}
\item{2 lectures 1.5 hours /week}
\item{3 hours of associated homework expected for each lecture}
\item{Total weekly commitment approximately 9-10 hours}
\item{6 assignments each worth $10\%$ of final grade and done invididually.}
\item{Assignments typically will combine some reading and understanding of MIR literature as well as some programming of MIR algorithms}
\item{1 final group project (2-3 students per group) $40\%$}
\end{itemize}
\end{frame}{}
\begin{frame}{History of MIR before computers}
How did a listener encounter a new piece of music throughout history ?
\begin{itemize}
\item{Live performance}
\item{Music Notation}
\item{Physical recording}
\item{Radio}
\end{itemize}
\end{frame}
\begin{frame}{Brief History of computer MIR}
\begin{itemize}
\item{Pre-history ($<2000$): scattered papers in various communities. Symbolic
processing mostly in digital libraries and information retrieval venues
and audio processing (less explored) mostly in acoustics and DSP venues.}
\item{The birth $2000$: first International symposium on Music Information
Retrieval (ISMIR) with funding from NSF Digital Libraries II initiative
organized by J. Stephen Downie, Time Crawford and Don Byrd. First contact between the symbolic and the audio side.}
\item{2000-2006} Rapid growth of ISMIR
\item{2006-2014} Slower growth and steady state
\item{2015-2021} Maturation - robust industry involvement
\end{itemize}
\end{frame}
\begin{frame}{Teaching Organization}
One of the main challenges of teaching MIR is how to
structure/organize the material to be taught. A good organization
should be comprehensive (i.e cover most MIR tasks/topics),
incremental (concepts should be introduced gradually in a logical
fashion), flexible (it should be possible to emphasize or skip
different topics), and balanced.
In the following slides I will discuss some possible ways to
organize MIR for teaching purposes based on my extensive experience
and experimentation.
\end{frame}
\begin{frame}{Organize by stages/specificity}
\begin{itemize}
\item{Stages}
\begin{itemize}
\item{Representation/Hearing}
\item{Analysis/Learning}
\item{Interaction/Action}
\end{itemize}
\item{Specificity}
\begin{itemize}
\item{Audio fingerprinting}
\item{Common score performance}
\item{Cover song detection}
\item{Artist identification}
\item{Genre classification}
\item{Recommendation ? }
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{Organize based on data}
Data sources:
\begin{itemize}
\item{Audio}
\item{Track metadata}
\item{Score}
\item{Lyrics}
\item{Reviews}
\item{Ratings}
\item{Download patterns}
\item{Micro-blogging}
\end{itemize}
\end{frame}
\begin{frame}{MIR Tasks}
\begin{itemize}
\item{Similarity retrieval, playlists, recommendation}
\item{Classification and clustering}
\item{Tag annotation}
\item{Rhythm, melody, chords}
\item{Music transcription and source separation}
\item{Query by humming}
\item{Symbolic MIR}
\item{Segmentation, structure, alignment}
\item{Watermarking, fingerprinting and cover song detection}
\end{itemize}
\end{frame}
\begin{frame}{Audio content analysis - A. Lerch}
\begin{itemize}
\item{Fundamentals (signals, sampling, quantization, convolution, blocking, fourier transform, correlation)}
\item{Instantaneous features (statistical features, spectral features, post-processing, dimensionality reduction)}
\item{Intensity and Loudness}
\item{Tonal Analysis (pitch, monophonic pitch detection, polyphonic pitch tracking, tuning estimation, key recognition, chord detection)}
\item{Temporal Analysis (onset detection, tempo and beat detection, downbeat, rhythm description)}
\item{Audio Alignment (dynamic time warping, audio/score alignment)}
\item{Music Classification (genre, similarity, mood)}
\item{Audio Fingerprinting}
\end{itemize}
\end{frame}
\begin{frame}{Fundamentals of Music Processing - Meinard Muller}
\begin{itemize}
\item{Music Representations}
\item{Fourier Analysis of Signals}
\item{Music Synchronization}
\item{Music Structure Analysis}
\item{Chord Recognition}
\item{Tempo and Beat Tracking}
\item{Content-based Audio Retrieval}
\item{Musically Informed Audio Decomposition}
\end{itemize}
\end{frame}
\begin{frame}{Kadenze online MIR program}
\begin{itemize}
\item{Extracting Information from Music Signals}
\item{Machine Learning for MIR}
\item{Music Information Rertieval Systems}
\end{itemize}
\end{frame}
\begin{frame}{Extracting Information from Music Signals}
\begin{itemize}
\item{Time, frequency, and sinusoids}
\item{DFT and Time-Frequency Representations}
\item{Monophonic Pitch Detection}
\item{Audio Feature Extraction}
\item{Rhythmic Analysis}
\end{itemize}
\end{frame}
\begin{frame}{Machine Learning for MIR}
\begin{itemize}
\item{Supervised Learning and Naive Bayes Classification}
\item{Discriminative Classifiers}
\item{Genre Classification}
\item{Emotion Recognition and Regression}
\item{Tags}
\item{Music Visualization}
\end{itemize}
\end{frame}
\begin{frame}{Music Retrieval Systems}
\begin{itemize}
\item Query Retrieval
\item Polyphonic Alignment and Structure Segmentation
\item Chord Detection and Cover Song Identification
\item Transcription and Sound Source Separation
\item Audio Fingerprinting and Watermarking
\end{itemize}
\end{frame}
\begin{frame}{Other topics}
\begin{itemize}
\item{Optical Music Recognition (OMR)}
\item{Symbolic Music Retrieval}
\item{MIR for live music performance}
\item{Computer-assisted music pedagogy}
\item{Computational Ethnomusicology}
\item{MIR for music production}
\item{Natural language processing for MIR}
\end{itemize}
\end{frame}
\begin{frame}{Notebook break I}
An important pedagogical advice is to use multiple alternating modes
of delivery when teaching a concept. Notebooks combine text and
snippets of code structured in cells that can be executed
individually. Originally popularized by Mathematica, they are
nowdays very popular as ways of introducing concepts. For the last
couple of years I frequently utilize Python notebooks in my teaching
and throughout this tutorial I will be showing some representative
examples.
\end{frame}
\begin{frame}{Organization: Discussion/Questions}
Any thoughts/questions/discussion regarding the first module ``Organization'' ? (5-10 minutes)
\end{frame}
\section{Adapting to a target audience}
\begin{frame}{Adaptation}
Another challenge when teaching MIR is the diversity of the students.
MIR is an interdisciplinary field and that can mean that the students
can come from different disciplines and can have very different backgrounds.
In this module we will go over some strategies for adapting
the material for different audiences and providing supports
to students with different backgrounds.
\end{frame}
\begin{frame}{Interdisciplinary Research}
Inherently inter-disciplinary and cross-disciplinary work. Connecting
theme: making computers better understand music to create more
effective interactions with musicians and listeners.
\begin{columns}
\column{0.5\textwidth}
\begin{itemize}
\item{Music Information Retrieval}
\item{Digital Signal Processing}
\item{Machine Learning}
\item{Human-Computer Interaction}
\item{Software Engineering}
\end{itemize}
\column{0.5\textwidth}
\begin{itemize}
\item{Artifical Intelligence}
\item{Multimedia}
\item{Robotics}
\item{Visualization}
\item{Programming Languages}
\end{itemize}
\end{columns}
\end{frame}
\begin{frame}{EDI}
\begin{itemize}
\item{{\bf Equity} - everyone is treated fairly and equally}
\item{{\bf Diversity} - value differences}
\item{{\bf Inclusion} - everyone feels supporterd and integrated}
\end{itemize}
As instructors how can we support EDI?
\end{frame}
\begin{frame}{Some ideas for supporting EDI}
\begin{itemize}
\item{Focus and value diversity - for example go over a list of past projects that are as diverse in terms of topic as possible }
\item{Consent form to share student material with future offerings/students}
\item{Highlight examples of researchers from underrepresented groups}
\item{Encourage student participation}
\item{Model constructive and respectful feedback}
\item{Optional educational supports, different style deliverables}
\item{Interdisciplinary field means everyone will be outside their comfort zone}
\item{Peer mentorship}
\end{itemize}
\end{frame}
\begin{frame}{Backgrounds}
MIR students can come from many different backgrounds. Let's look
at some representative examples and what they might lack:
\begin{itemize}
\item{{\bf Computer Science:} good programming and ML skills, not as comfortable with mathematical notation and numerical stuff, no formal music knowledge}
\item{{\bf Electrical and Computer Engineering:} good mathematical notation
and numerical programming, maybe ML, not as experienced with general programming, no formal knowledge}
\item{{\bf Music:} little experience with programming, intimidated by mathematical notation, good knowledge of music, writing skills}
\end{itemize}
\end{frame}
\begin{frame}{Music 101}
Some basic knowledge about music notation and theory is useful in
understanding a lot of MIR research.
\begin{itemize}
\item{Absolute and relative encoding of pitch/intervals}
\item{Common western music relative rhythm notation}
\item{Chords/Tonality/Key}
\item{Very briefly scales, harmony, counterpoint, instrumentation}
\item{Excellent tutorial ISMIR 2021: Practical Music Theory for MIR researchers}
\end{itemize}
\end{frame}
\begin{frame}{Programming 101-A}
The biggest challenge with teaching programming to musicians
is to convince them that it is not that hard. There are many
excellent tutorials available for any programming language/environment.
Some thoughts:
\begin{itemize}
\item{Ask the students to change the tutorial examples they are encountering to be more about music. For example instead of the finding the maximum of a list of numbers change it to finding the highest note in a melody}
\item{Music21 is a great environment to introduce Python programming
to music students as they can easily see and hear the results of symbolic music manipulations}
\item{Try to provide skeleton code with increasing amounts of student involvement}
\end{itemize}
\end{frame}
\begin{frame}{Programming 101-B}
\begin{itemize}
\item{Jupyter notebooks provide means of introducing interactivity and programming}
\item{Some music students are familiar with visual programming environments like Max/MSP. This can be leveraged to get them going with text-based programming}
\end{itemize}
\end{frame}
\begin{frame}{Mathematics 101}
Understanding of mathematics is a long process and in many cases
music students or even CS students feel that it is not necessary.
Some thoughts:
\begin{itemize}
\item{Decouple notation from concepts}
\item{Three views: concrete toy example, computer code, mathematical expression}
\item{Read papers with emphasis on writing and notation not content}
\item{Basic vector/matrix notation from linear algebra, probabilities/stats}
\item{Khan Academy}
\end{itemize}
\end{frame}
\begin{frame}{Active Learning}
The term {\bf active learning} is used to describe a variety of
approaches that are different than the traditional lecture mode
of delivering courses. Active learning interventions include:
\begin{itemize}
\item{Group problem-solving}
\item{Worksheets completed in class}
\item{Use of polls/clickers}
\item{Studio/workshop course design}
\item{Mock conference/peer review}
\item{Live coding}
\end{itemize}
\end{frame}
\begin{frame}{Class Size}
Class size can have a significant effect on how to deliver a
course. Smaller class sizes (<30-40 students) allow more
discussion/interaction and do not requireas much preparation for
assessment. Bigger class sizes (>40 students) pose different challenges.
Ideas for large class sizes:
\begin{itemize}
\item{Labs can function as smaller classes}
\item{More scaffolding in learning materials}
\item{More standarized assignments}
\item{Organized communication in public forums with themes}
\item{Detailed weekly workplans}
\end{itemize}
\end{frame}
\begin{frame}{Evolving over time}
It is important to constantly evolve both the content and how it is
delivered to reflect the rapid changes in MIR research and the surrounding
software context.
\begin{itemize}
\item{2000 - The birth of Marsyas - self-contained code base for MIR, DSP and ML, C++ from scratch}
\item{2003 - Weka (and SVMs) comes to the scene for ML}
\item{MIREX 2007 - Marsyas submissions (92 citations)}
\item{2010 - sklearn}
\item{2014 - librosa,mir\_eval}
\item{2016 - shift to notebooks for teaching}
\item{2018 - more and more datasets}
\item{2019 - Spleeter}
\end{itemize}
\end{frame}
\begin{frame}{Notebook break II}
Let's look at a notebook for explaining the basics of rhythm notation
to studens without a formal music background and another one explaining
matrix factorization for students without previous linear algebra skills.
\end{frame}
\begin{frame}{Adaptation: Discussion/Questions}
Any thoughts/questions/discussion regarding the second module ``Adaptation'' ? (5-10 minutes)
\end{frame}
\section{Online learning}
\begin{frame}{Online learning}
Online learning has been around for a long time. Massive Open Online
Courses rose to prominence around 2012. Despite initial claims of
signalling the death of brick and mortar university teaching they
have simply become another mode of learning. Covid and the resulting
transition to online learning has resulted in a renewed interest in online
learning. In this module we will discuss online learning strategies
for effective online teaching of MIR.
\end{frame}
\begin{frame}{MOOC critiques}
In fact, the absence of serious pedagogy in MOOCs is rather striking,
their essential feature being short, unsophisticated video chunks,
interleaved with online quizzes, and accompanied by social
networking. - Moshe Vardi
A lot of online learners seek very specific answers to particular
problems they encounter rather than more general learning of concepts.
For example they might look up the syntax of Python list comprehensions
but do not care about the more general ideas of functional programming.
\end{frame}
%% 50
\begin{frame}{My personal favorite online learning experience}
Open Studio is an online learning resource for Jazz that I discoverd
and have followed regularly throughout the pandemic. It is very different
than a MOOC. Some observations:
\begin{itemize}
\item{As much as I would love to I can't find the time to properly practice}
\item{Regular live activities - guided practice sessions}
\item{Sense of community}
\item{Good production quality but also informal interaction}
\item{Feeling that everyone is part of a journey}
\item{Regular free social media presence to attract new members}
\end{itemize}
\end{frame}
\begin{frame}{Categories of online learners}
\begin{itemize}
\item{Auditors - watch all videos, few quizzes/exams}
\item{Completers - most videos, most quizzes/exams}
\item{Disengaged - quickly drop the course}
\item{Sampling - occassionally watch specific lectures}
\end{itemize}
\end{frame}
\begin{frame}{Flipped classroom}
Traditionally students are introduced to new content in class and
then work on assignments and projects independently at home. A flipped
classroom works by introducing the content at home and practice working
through the material at school. It is a type of blended learning.
Flipping a classroom requires providing video recording of lectures
as well as other support material for the students to learn concepts
at their own pace. Common issue is students not viewing the material.
In 2018 I taught MIR completely as a flipped course with 2 hours
of contact per week in which I answered questions, had discussions,
and did some tutorials. The outcome was mixed: students were much more
engaged and asked lots of questions but they said they would have
liked some more traditional lecturing.
\end{frame}
\begin{frame}{Recording video}
Video recording is essential for online learning. Unfortunately, the
standards for video quality and production are becomning higher and
higher with all the content creator stuff. Some pragmatic advice:
\begin{itemize}
\item{Video recording full lectures can be useful for review but
does not work very well for online learning}
\item{Either edit well or not at all}
\item{Shorter (5-7 minutes) videos of concepts are better}
\item{Change of camera - close up, zoom out}
\item{Alternate talking, hand-writing, live-coding, slides}
\item{Scripting and tele-prompting}
\item{Repeated recording}
\end{itemize}
\end{frame}
\begin{frame}{Research-enriched teaching}
Research-enriched teaching is a term used to describe
university level teaching in which the experience of conducting research is
interleaved with the teaching of concepts.
\begin{itemize}
\item{Present papers by previous students in the course}
\item{Invite as guest lectures previous students}
\item{Invite industry researchers}
\item{Present industry-led papers}
\item{Connect to commercial products}
\item{Use paper templates for project reports}
\item{Conference style peer reviewing}
\end{itemize}
\end{frame}
\begin{frame}{Scaffolding}
Scaffolding provides multiple supports in the form of worked
examples, incremental increase of difficulty, live worked examples,
and experimentation to assist with learning of concepts. It also
refers to breaking a complicated concept into smaller units.
At the extreme it becomes hand holding or spoon feeding.
\begin{itemize}
\item{Show and tell - active coding}
\item{Tap into prior knowledge}
\item{Give time to talk}
\item{Pre-teach vocabulary/notation}
\item{Pause, pause, review, pause}
\end{itemize}
\end{frame}
\begin{frame}{Expanding audience - Initial experiment}
In the Spring of 2014 I started video recording my MIR
lectures (simply doing screencasting and Google Hangouts/YouTube)
and invited external participants to the course. Overall it was
a positive experience despite the low production quality with
more than 300 external participants engaging with the course
by viewing videos and participating in discussion. No assessment/work
was offered to the external participants.
\end{frame}
\begin{frame}{Expanding audience - Kadenze course}
This experience led to the development of a full online
MIR program consisting of 3 courses offered through Kadenze Inc.
\url{https://www.kadenze.com/programs/music-information-retrieval}
The videos were recorded in the Spring of 2016 but the course was
finally made public in 2020.
{\bf Challenges:} contract agreement with university, privacy issues,
grading issues
\end{frame}
\begin{frame}{Notebook break III}
An alternative to video recordings is providing a more complete self-contained
detailed notebook. The FMP (Foundations of Music Processing) notebooks are a
great example of such a teaching resource. Let's look at the notebook
for music syncronization as an example.
\end{frame}
\begin{frame}{Online Learning: Discussion/Questions}
Any thoughts/questions/discussion regarding the second module ``Online Learning'' ? (5-10 minutes)
\end{frame}
\section{Assessment}
% degree of difficulty labeling, self-reflection, peer review
% from model solutions to peer critiques, project scaffolding,
%
\begin{frame}{Assessment}
Assessment is a big and unavoidable challenge in teaching.
Ideally, assessment should be comprehensive and properly capture
the knowledge and ability of students in a particular topic. The more
multi-faceted this assessment is, the more likely it is that it will
correlate better with real world performance. It is important to talk
to students about assessment and share how difficult it is. It is also
important to differentiate between assessment and feedback although
obviously they are related.
\end{frame}
\begin{frame}{Assessment Overfitting}
Standarized assessment especially face-to-face timed exams can be
poor indicators of actual knowledge or performance. At the same time
they are less prone to academic integrity violations and perceived
as more objective and fair. The current trend in education research
is to emphasize learning outcome and well defined rubrics there is a
dange of putting too much effort into teaching for the assessment
not for the knowledge. Motivation, curiosity, and creativity are
not fosted when only material that is going to be in the exam is
covered. I call this assessment overfitting in analogy to ML
overfitting. It is more likely to happen with a flipped classroom
style of instruction.
\end{frame}
\begin{frame}{Academic Integrity}
Academic integrity violations are increasing with the shift to online
teaching.
\begin{itemize}
\item{Contract cheating}
\item{Public code repositories (github)}
\item{Honest and pragmatic discussion impossible}
\item{Build trust and respect rather than blame}
\item{Copy but understand - self respect is critical}
\end{itemize}
\end{frame}
\begin{frame}{Degree of Difficulty (my magic bullet)}
An important and fundamental challenge with assessment is calibration
to student ability. If the tasks are too hard or too easy that
leads to frustration. It is important to stress that each student
is unique and has their own story. Degree of difficulty labeling
refers to explicity labeling assessed work (exam questions, assignments,
quizzes) and even reading materials in terms of difficulty.
\begin{itemize}
\item{Basic ($40\%$), Expected ($50\%$), Advanced ($10\%$)}
\end{itemize}
\end{frame}
\begin{frame}{Self-reflection}
A good way to ensure personalized course work is to include
some self-reflection activities. These could be in the form
of a blog, journal, discussion board. Reading papers and code
and writing about what was read is another good strategy.
One of the challenges with self-reflection is that it takes more
time to grade and does not scale easily to large classes.
\end{frame}
\begin{frame}{Peer review}
Peer review is a great way to make the students take more ownership of
their work and learn to provide constructive feedback. It can be challenging
to administer in large courses. Also some institutions have restrictions on
using student feedback for grading.
\end{frame}
\begin{frame}{From Model solution to peer exposure and review}
The classic approach for assignments is to provide a model solution.
A better approach is to critique (anonymous) student work and contrast
different possible solutions. It also useful for students to contrast
their work with other students especially when the assessment is subjective.
\end{frame}
\begin{frame}{Auto-grading}
Auto-grading is trivial with simple questions such as multiple choice
or keyword matching quizzes. However, it becomes quite more complex
and challenging as the deliverables get more complicated. A more
complex setup is to have various unit tests for a piece of code
(some shared with students and some withheld). My ideal situation
would be to have a problem generator that personalizes assignments
to students followed by more flexible similarity checking. For example
a monophonic melody could be randomly generated using samples, then
automatic pitch extraction could be applied, followed by comparing the
results with a state-of-the art pitch detectors. The similarity will
have to be calibrated but MIR techniques can be used for this purpose.
\end{frame}
\begin{frame}{Rubrics and feedback}
A well defined rubric defines how a deliverable will be graded in a
detailed way. It can help students, instructors, and teaching assistants
be fair and consistent. It also can be used to provide feedback to the student
if it is well designed. It is extremely challenging to design good rubrics.
\end{frame}
\begin{frame}{Gameification}
Awards, prizes, badges - recycling of journals, student voting
\end{frame}
\begin{frame}{Notebook break IV}
For assignment a effective possibility is to base them on existing
notebook/code resources. Let's look at a particular example for
monophonic pitch detection from the Kadenze program.
\end{frame}
\begin{frame}{Assessment: Discussion/Questions}
Any thoughts/questions/discussion regarding the second module ``Assessment'' ? (5-10 minutes)
\end{frame}
\section{Projects}
\begin{frame}{Group projects}
The most interesting for the students and most challenging aspect
of teaching a MIR course is the completion of a group project. One of my
most satisfying experiences as a MIR teacher has been to see some of these
projects evolve into successful ISMIR papers. In this module, we will
go over some of the logistics of group projects, overview some examples,
and discuss instructor strategies for making them successful.
\end{frame}
\begin{frame}{Group formation and size}
Group projects are challenging but rewarding. Optimal group size can vary.
In small classes groups of 2 are feasible. Groups of 3/4 students are more
common with larger classes. Bigger groups might be a necessity with really
large classes but almost always create work balance tensions. Some ideas:
\begin{itemize}
\item{Random group formation}
\item{Student-led group formation}
\item{Project-based group formation}
\item{Multi-phase with reshuffling}
\item{Pre-defined roles/responsibilities}
\item{Trade-off between detailed specification and creative freedom}
\end{itemize}
\end{frame}
\begin{frame}{Project deliverables}
\begin{itemize}
\item{Design Specification $15\%$}
\item{Progress Report $15\%$}
\item{Final report $20\%$}
\item{Presentation/video $10\%$}
\end{itemize}
\end{frame}
\begin{frame}{Project stages}
At least one of these stages should be not trivial. With more software
frameworks and datasets it is easier to create more sophisticated projects.
\begin{itemize}
\item{Problem specification, data collection and ground truth annotation}
\item{Information extraction and analysis}
\item{Implementation and interaction}
\item{Evaluation}
\end{itemize}
\end{frame}
\begin{frame}{Example projects}
Several projects in the UVic MIR course evolved into ISMIR publications.
Some examples:
\begin{itemize}
\item{Query-by-beatboxing - 2006}
\item{Stereo Panning - 2008}
\item{Examining DJ ordering of playlists - 2013}
\item{Curriculum learning for automatic chord recognition (2021)}
\end{itemize}
\end{frame}
\begin{frame}{Notebook break V}
Matrix factorization notebook
\end{frame}
\begin{frame}{Projects: Discussion/Questions}
Any thoughts/questions/discussion regarding the second module ``Projects'' ? (5-10 minutes)
\end{frame}
\section{Resources}
\begin{frame}{Teaching Resources}
As MIR evolves as a research field, there are more and more
resources such as books, overview articles, software frameworks and
tools, and datasets that can be used to support teaching. In this module,
we go over some of my personal favorites. This is by no means a comprehensive
list of resources but rather some representative examples from what I use
in my own teaching with some discussion of what I like about them.
\end{frame}
\begin{frame}{Books}
\begin{itemize}
\item Fundamentals of Music Processing - Meinard Muller
\item Audio content analysis - Alexander Lerch
\item Music Data Mining - Li, Ogihara, Tzanetakis (editors)
\item Digital Signal Processing Primer - Ken Steiglitz
\item Music Similarity and Retrieval - Markus Schedl
\end{itemize}
\end{frame}
\begin{frame}{Overview papers}
Good overview papers can be a great resources for MIR students.
A favorite example:
``Automatic chord estimation from audio: A review of the state of the art'', 2014. M McVicar, R Santos-Rodríguez, Y Ni, T De Bie
\end{frame}
\begin{frame}{MIR Software Frameworks}
\begin{itemize}
\item{Essentia}
\item{MIR Toolbox}
\item{librosa}
\item{Marsyas}
\item{Music21}
\end{itemize}
\end{frame}
\begin{frame}{MIR Task Software}
\begin{itemize}
\item{Spleeter}
\item{musicnn}
\item{madmom}
\item{melodia}
\end{itemize}
\end{frame}
\begin{frame}{MIR Tools}
\begin{itemize}
\item{mirdata}
\item{jams}
\item{mireval}
\end{itemize}
\end{frame}
\begin{frame}{MIR GUIs}
\begin{itemize}
\item{Sonic Visualizer/Vamp plugins}
\item{Audacity}
\item{Tony}
\item{DAWs such as Ableton Live, Reaper, Plugins}
\item{Max/MSP, PureData}
\item{Game Engines}
\end{itemize}
\end{frame}
\begin{frame}{Other Software}
\begin{itemize}
\item{Weka}
\item{scikit-learn}
\item{tensorflow}
\item{pytorch}
\item{pandas,numpy,scipy}
\item{Juce}
\end{itemize}
\end{frame}
\begin{frame}{Datasets}
\begin{itemize}
\item{GTZAN (historic)}
\item{FMA (large, medium, small)}
\item{Magnatagatune}
\item{Music4All}
\item{MTG-JAMENDO}
\item{mir\_data}
\end{itemize}
\end{frame}
\begin{frame}{Reproducible research}
\begin{itemize}
\item{Provide code, data, plots}
\item{Work on multiple machines and with multiple people}
\item{It takes a lot of effort and time but can be worth it}
\item{Do not and do re-invent the wheel}
\item{Be part of an ecosystem}
\item{Open source license}
\item{Public source repository}
\item{Documentation, tutorials, examples}
\item{Marsyas: 581 citations, librosa: 1251 citations, scikitlearn: 47321, essentiaL: 431}
\end{itemize}
\end{frame}
\begin{frame}{Teaching Materials}
\begin{itemize}
\item{\url{https://musicinformationretrieval.com/}}
\item{\url{https://ismir.net/resources/tutorials/}}
\item{\url{https://www.audiolabs-erlangen.de/resources/MIR/FMP/C0/C0.html}}
\item{\url{https://www.audiocontentanalysis.org/}}
\end{itemize}
\end{frame}
\begin{frame}{Environments}
\begin{itemize}
\item{Jupyter notebooks}
\item{Hosted environments: JupyterHub, Google Colab}
\item{Github}
\item{Overleaf}
\end{itemize}
\end{frame}
\begin{frame}{Notebook break VI}
THX Logo Notebook
\end{frame}
\begin{frame}{Resources: Discussion/Questions}
Any thoughts/questions/discussion regarding the second module ``Resources'' ? (5-10 minutes)
\end{frame}
\section{Conclusions}
\begin{frame}{Conclusions}
\begin{itemize}
\item Teaching is a journey not just a destination
\item Teaching is garden not just a journey
\item Experiment and question assumptions
\item Be honest and pragmatic with students
\item Focus on intuition and motivation
\item Teaching is more about pacing and filtering and less about content delivery
\end{itemize}
\end{frame}
\setbeamercolor{bibliography item}{fg=black}
\setbeamercolor*{bibliography entry title}{parent=palette primary}
\bibliographystyle{amsalpha}
\bibliography{dhsi_2014}
\end{document}
| {
"alphanum_fraction": 0.7634287214,
"avg_line_length": 31.479338843,
"ext": "tex",
"hexsha": "445b25bd775d9aece8ee0ee66fa3c64a8c38de26",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-02-10T22:05:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-10T22:05:06.000Z",
"max_forks_repo_head_hexsha": "bce8ddf7caec22cab50c10ccf0a59e870b04cb24",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "gtzan/teaching_mir",
"max_forks_repo_path": "ismir2021_teaching_mir_tutorial/ismir_2021_tutorial.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bce8ddf7caec22cab50c10ccf0a59e870b04cb24",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "gtzan/teaching_mir",
"max_issues_repo_path": "ismir2021_teaching_mir_tutorial/ismir_2021_tutorial.tex",
"max_line_length": 221,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bce8ddf7caec22cab50c10ccf0a59e870b04cb24",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "gtzan/teaching_mir",
"max_stars_repo_path": "ismir2021_teaching_mir_tutorial/ismir_2021_tutorial.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9705,
"size": 38090
} |
% \chapter{Control Network Toolkit}
% \label{ch:controlnettk}
% This chapter covers the first additional boost of tools for \ac{ASP}!
% Control Networks are a data structure that at their core contain image
% features that can be tracked in multiple images. Since these features
% can be tracked in multiple images, they can be triangulated and
% represent a 3D location. This control network can then be used in
% processes such as Bundle Adjustment using tools like
% \texttt{isis\_adjust} and \texttt{jigsaw}.
% \emph{Warning: This toolkit is not finished but this documentation
% hopes to allow some use in its early state.}
% The method of developing a control network with \ac{VW} and \ac{ASP}
% is that we try to track as many 'natural' features and then reduce
% into a control network. This is done in a 6 step process.
% \begin{itemize}
% \item Detect features using a second order filter \emph{(LoG)}.
% \item Describe these features by their surroundings with a tag.
% \item Match tags of interest points between images.
% \item Filter these matches for error using RANSAC.
% \item Reduce matches further for ease of processing and also to assure uniform distribution.
% \item Collect all pair-wise matches and write as Control Network.
% \end{itemize}
% I'm going to provide an example of how to use this software with
% Apollo Metric images. Hopefully you can follow along with whatever
% imagery you happen to have around. Note in the example below we use a
% lot of calls to \texttt{xargs}. That command is helpful for spawning
% multiple processes to work on a pool of jobs. The argument \emph{\-P 10}
% states that at most it should have 10 processes running
% simultaneously. Lower that value to the number of cores available on
% your system.
% We are going to start out first by gathering interest points \emph{(or
% features)} and this is accomplished with a tool called
% \texttt{ipfind}. This handy tool is provided by \ac{VW} and is not
% built against ISIS. This means it can only read cube files through a
% library called GDAL. To make sure cubes files can be read correctly by
% GDAL and thus ipfind, we'll have to convert our input imagery to
% something more reliable like TIFFs.
% \begin{verbatim}
% ISIS 3> isis2std from=INPUT to=OUTPUT.tif format=TIFF
% \end{verbatim}
% I don't like to call isis2std on every file myself. Here's the
% commands I use to do this in parallel.
% \begin{verbatim}
% ISIS 3> echo *.cub | xargs -n1 echo | awk -F "." '{print $1}' |
% xargs -n1 -P 10 -I {} isis2std from={}.cub to={}.tif format=TIFF
% \end{verbatim}
% The directory should now be filled with TIFF format doppelgangers. We
% are ready to perform \texttt{ipfind} which will detect features and
% describe them in one go. The results of each operation will be saved
% in a \texttt{.vwip} file which will later be read during
% matching. There are many algorithms that \texttt{ipfind} can use for
% detection and description but the defaults of OBALoG and SGrad should
% do fine for most situations. Also note that \texttt{ipfind} has a
% debug flag '-d' which can be used to output debug images that show the
% location of all detected features.
% \begin{verbatim}
% > echo *.tif | xargs -n1 -P 10 ipfind --max 10000
% \end{verbatim}
% Notice the options \emph{\-\-max 10000} used for the \texttt{ipfind}
% example. This is to limit the number of detected interest points so
% that the next step doesn't take too long.
% Matching is calculated pairwise. There are many ways to do this, but
% the simplest is just a brute force permutation of all possible
% combinations. Here's how:
% \begin{verbatim}
% > pairlist_all.py *.tif | xargs -n2 -P 10 ipmatch -r homography
% \end{verbatim}
% Alternatively we could just match between images that happen to be
% sequential by name.
% \begin{verbatim}
% > pairlist_seq.py *.tif | xargs -n2 -P 10 ipmatch -r homography
% \end{verbatim}
% Though for large data sets it doesn't seem appropriate to compare all
% images to each other as the physically don't see each other in
% anyway. A good way of reducing the matches is by deciding to only
% calculate matches between images that are separated by no more than a
% few degrees. This is what \texttt{pairlist\_degree.py} can
% do. Internally it calls \texttt{camrange} and this can take a
% while. I'd recommend saving the output to a file before sending out to
% ipmatch.
% \begin{verbatim}
% ISIS 3> pairlist_degree.py *cub -a 10 -iext tif > pairs_to_process.lst
% ISIS 3> cat pairs_to_process.lst |
% xargs -n2 -P 10 ipmatch -r homography
% \end{verbatim}
% If you've been reading the output from ipmatch, you may have noticed
% that some pair-wise matches might have more than a 100 matches! This
% is probably overkill for some cases and could potentially choke an
% application further down the process. Also at this point, our interest
% points could be located anywhere. The worst case is that all matched
% features have managed to clump around interesting objects like a
% sparkly crater or a sharp cut rill. We can't enforce that the matched
% features are evenly placed, but we can trim them down to be somewhat
% even.
% Enter stage left, \texttt{reduce\_match}. This utility will thin down
% the matches so that they are evenly distributed. You specify the
% minimum and maximum amount of matches that can exist for a pair of
% images. Setting the maximum trims down excessive matches. The minimum
% however will actually delete match files that have to few matches. The
% filtering function RANSAC that is used to remove outliers does not
% have a graceful failure. When that algorithm fails it will return all
% outliers but usually with the minimum number of matches to solve for
% its fitting functor. Using a homography fitting functor means 8
% matches. We'll go ahead and delete anything 10 and under.
% \begin{verbatim}
% > echo *.match | xargs -n1 -P 10 reduce_match --min 10 --max 100
% \end{verbatim}
% Finally we are ready to collect all pairwise matches into a single
% control network. From the same directory that houses all match files
% and cube files, we will call the command
% \texttt{cnet\_build}. Assuming we want to build an ISIS style control
% network, here's how to perform the last step.
% \begin{verbatim}
% > cnet_build *.cub -t isis -o asp_control
% \end{verbatim}
% Let's go ahead and see how the results turned out in qnet!
% \begin{verbatim}
% ISIS 3> echo *.cub | xargs -n1 echo > cube.lst
% ISIS 3> qnet
% \end{verbatim}
% Inside qnet you'll want to click 'open'. On the first dialog you'll
% pick out the \emph{cube.lst} file we just created. On the second, we
% pick our created control network \emph{asp\_control.net}.
% \begin{center}
% \includegraphics[height=3.7in]{images/cnettk_qnet_screen_400px.png}
% \end{center}
% Occasionally you might find yourself not quite satisfied with results
% of Control Network Toolkit. Don't worry, we won't take it
% personally. However there are still some options available to you. You
% could do manual tie point additions with \texttt{qnet}. However, this
% can be a slow and scary process if you need to load up the entire
% control network. Instead you can just load up the problem images and
% do manual tie pointing. Then afterwards you could merge this smaller
% control network with the large control network you created with
% \texttt{cnet\_merge}.
% \begin{verbatim}
% > cnet_merge large.cnet small.net -o larger.cnet
% \end{verbatim}
% The above command will create an even larger control network which
% contains both \emph{large.cnet} and \emph{small.net}. This is helpful
% for filling in those spots where the automatic control network failed
% to work.
| {
"alphanum_fraction": 0.7467524116,
"avg_line_length": 44.9421965318,
"ext": "tex",
"hexsha": "ee10cf3a27fe2e15e75f2c76bd48ca749c5a23fd",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "084c3293c3a5382b052177c74388d9beeb79cf0b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "AndrewAnnex/StereoPipeline",
"max_forks_repo_path": "docs/book/controlnettk.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "084c3293c3a5382b052177c74388d9beeb79cf0b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "AndrewAnnex/StereoPipeline",
"max_issues_repo_path": "docs/book/controlnettk.tex",
"max_line_length": 94,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "084c3293c3a5382b052177c74388d9beeb79cf0b",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "AndrewAnnex/StereoPipeline",
"max_stars_repo_path": "docs/book/controlnettk.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2016,
"size": 7775
} |
\input{../utils/slide-preamble1.tex}
\input{../utils/slide-preamble2.tex}
\input{../utils/macros.tex}
\bibliography{../bib/references}
\input{../utils/title-info.tex}
\title[Natural Selection]{Evolution by Natural Selection}
% \date{\today}
\date{April 2, 2015}
\begin{document}
\begin{noheadline}
\maketitle
\end{noheadline}
\nopost{
\begin{noheadline}
\begin{frame}[c]
\vspace{-6mm}
\begin{center}
\includegraphics[height=1.3\textheight]{../images/seating-chart.pdf}
\end{center}
\end{frame}
\end{noheadline}
}
\begin{noheadline}
\begin{frame}
\begin{clickerquestion}
\item Which of the following experiments would be the best way to test
the theory of evolution as formulated by Lamarck?
\begin{clickeroptions}
\item August Weismann kept mice in identical conditions in the lab
for 22 generations. Each generation, he cut off their tails.
\item Follow a population of mice over many generations and record
(a) any changes that occur in the environment, and (b) any
corresponding changes in mouse traits.
\item \clickeranswer{Keep a population of mice in cold conditions.
(In response, the mice will draw their tails up to conserve
heat.) Continue for many generations. Measure tail length
each generation.}
\item Keep a population of mice under identical conditions in the
lab. Each generation, only allow the mice with the shortest
tails to breed.
\end{clickeroptions}
\end{clickerquestion}
\wbox{Weisman actually did this! But, \#3 is more Lamarckian, because
the mice are making an effort}
\end{frame}
\end{noheadline}
\section{Evolution by Natural Selection}
\begin{noheadline}
\begin{frame}
\frametitle{Today's issues:}
\tableofcontents
\end{frame}
\end{noheadline}
\begin{frame}
Where do species come from, and how have they come to be so well adapted to
their environments?
\begin{quote}
\ldots a naturalist,
\uncover<2->{reflecting on the mutual affinities of organic beings,}
\uncover<3->{on their embryological relations,}
\uncover<4->{their geographical distribution,}
\uncover<5->{geological succession,}
\uncover<6->{and other such facts, might come to the conclusion that
each species had not been independently created, but had descended
\ldots from other species.}
\uncover<7->{Nevertheless, such a conclusion \ldots would be
unsatisfactory, until it could be shown}
\uncover<8->{\highlight{HOW} the innumerable species inhabiting this
world have been modified \ldots}
\end{quote}
\end{frame}
\begin{frame}[t]
\frametitle{The pattern}
\begin{description}
\item[Evolution:]
\hmask{The change in the characteristics of a population through time}
\end{description}
\note[item]{Pattern: How different from Lamarck? It is NOT progressive}
\end{frame}
\begin{frame}
\frametitle{The process}
Darwin’s four postulates explain why/how evolution occurs
\begin{enumerate}
\item<2-> Individuals within populations are variable
\vspace{1cm}
\item<2-> Some of these variations are passed on to offspring
\vspace{1cm}
\item<2-> Not all individuals produce the same number of offspring
\vspace{1cm}
\item<2-> Individuals with certain heritable traits produce the most
offspring
% \vspace{1cm}
\end{enumerate}
\wbox{For each postulate, find example from finches in the textbook, and
think about how you would test it?}
\note[item]{Process: How different from Lamarck? It is a process of sorting
individuals, not transforming individuals}
\note[item]{\textbf{Emphasize:} individual finches did not change}
\end{frame}
\begin{frame}
Distill the 4 postulates to their essence
\vspace{0.5cm}
Natural selection occurs when:
\vspace{0.5cm}
\begin{enumerate}
\item<2-> Heritable variation
\vspace{0.5cm}
\begin{itemize}
\item<3-> leads to
\end{itemize}
\vspace{0.5cm}
\item<4-> Differential reproductive success
\end{enumerate}
\note[item]{\textbf{Emphasize:} the focus should be on reproduction, not
survival!}
% After 2) emphasize that the focus should be on reproduction, not
% survival; sing It ain’t necessarily so
\vspace{0.5cm}
\uncover<5->{
\highlight{KEY POINT:} Evolution is simply an outcome (the pattern) of this
process
}
\end{frame}
\begin{frame}
\frametitle{Case history on evolution by natural selection}
\begin{itemize}
\item Work in groups of 3; middle person is the scribe
\item Be sure your names are legible
\item We're here to answer questions
\item When you've completed page 1, enter ``1'' on your clicker and
start the second page
\item TAs will collect the worksheets
\end{itemize}
\end{frame}
\begin{frame}
\begin{enumerate}%[Q 1:]
\item[Q 2.] Heritable variation in AZT resistance?
\vspace{1cm}
\item[Q 3.] Differential reproductive success with respect to AZT resistance?
\vspace{1cm}
\item[Q 4.] AZT resistance present prior to start of therapy?
\vspace{1cm}
\item[Q 5.] Do individual HIV particles change?
\end{enumerate}
\end{frame}
\begin{frame}
\frametitle{You need to be able to do this!}
For any example of change in the characteristics of a population or species
through time, you should be able to explain how it could happen based on:
\begin{enumerate}
\item<2-> \highlight{Heritable variation} in the trait in question, and
\item<3-> \highlight{Differential reproductive success}, based on
variation in the trait in question (in the context of the
\highlight{environment}!)
\end{enumerate}
\note[item]{Most important concept in 180 (and in biology as a whole)}
\end{frame}
\begin{frame}
In biology, how do these terms differ from they are used in everyday
English?
\begin{description}
\item[Theory:] \ \\
\wbox{``speculation'' or something that's ``just an idea'' versus
``big'' hypothesis---one that is meant to explain a large chunk
of the natural world---some theories turn out to be wrong, some
turn out to be correct}
\item[Fitness:] \ \\
\wbox{Being buff versus reproductive success}
\vspace{1cm}
\item[Adaptation:] \ \\
\wbox{Individuals change in response to conditions versus a
heritable trait that increased fitness in a certain
environment. Everyday use is Lamarckian---``acclimation''}
\end{description}
\end{frame}
\section{Darwin's dilemma}
\begin{frame}
\frametitle{Darwin's dilemma: the problem of variation}
\uncover<1->{
The fact of evolution was accepted in the 1870s--1880s.
}
\bigskip
\uncover<2->{
Natural selction an evolutionary process was controversial until 1930s.
}
\begin{enumerate}
\item<3-> Selection will exhaust variation; Why?
\wbox{\footnotesize Advantageous alleles will go to fixation, then no more
variation and no more evolution}
% \vspace{2mm}
\item<4-> Blending inheritance will eliminate new, advantageous variants; why?
\wbox{\footnotesize New, rare advantageous variants will blend with the
common forms of the traits and dilute the advantage---this will
occur generation after generation until the advantageous trait
disappears.}
\end{enumerate}
\uncover<5->{- Darwin died thinking that his theory was in deep trouble}
\uncover<6->{- Solution had been published years before in 1865---Next
class!}
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.6499939002,
"avg_line_length": 32.3992094862,
"ext": "tex",
"hexsha": "ccaf02920737ffa6693d60572b239a92f7997691",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "joaks1/bio-180-intro-bio",
"max_forks_repo_path": "lecture-materials/04-natural-selection/04-natural-selection.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "joaks1/bio-180-intro-bio",
"max_issues_repo_path": "lecture-materials/04-natural-selection/04-natural-selection.tex",
"max_line_length": 86,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "joaks1/bio-180-intro-bio",
"max_stars_repo_path": "lecture-materials/04-natural-selection/04-natural-selection.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2048,
"size": 8197
} |
\section{SOLVE Operator}\ttindex{SOLVE}
SOLVE is an operator for solving one or more simultaneous algebraic
equations. It is used with the syntax:
\begin{verbatim}
SOLVE(EXPRN:algebraic[,VAR:kernel|,VARLIST:list of kernels])
:list.
\end{verbatim}
{\tt EXPRN} is of the form {\tt <expression>} or
\{ {\tt <expression1>},{\tt <expression2>}, \dots \}. Each expression is an
algebraic equation, or is the difference of the two sides of the equation.
The second argument is either a kernel or a list of kernels representing
the unknowns in the system. This argument may be omitted if the number of
distinct, non-constant, top-level kernels equals the number of unknowns,
in which case these kernels are presumed to be the unknowns.
For one equation, {\tt SOLVE}\ttindex{SOLVE} recursively uses
factorization and decomposition, together with the known inverses of
{\tt LOG}, {\tt SIN}, {\tt COS}, {\tt \verb|^|}, {\tt ACOS}, {\tt ASIN}, and
linear, quadratic, cubic, quartic, or binomial factors. Solutions
of equations built with exponentials or logarithms are often
expressed in terms of Lambert's {\tt W} function.\index{Lambert's W}
This function is (partially) implemented in the special functions package.
Linear equations are solved by the multi-step elimination method due to
Bareiss, unless the switch {\tt CRAMER}\ttindex{CRAMER} is on, in which
case Cramer's method is used. The Bareiss method is usually more
efficient unless the system is large and dense.
Non-linear equations are solved using the Groebner basis package.
\index{Groebner} Users should note that this can be quite a
time consuming process.
{\it Examples:}
\begin{verbatim}
solve(log(sin(x+3))^5 = 8,x);
solve(a*log(sin(x+3))^5 - b, sin(x+3));
solve({a*x+y=3,y=-2},{x,y});
\end{verbatim}
{\tt SOLVE} returns a list of solutions. If there is one unknown, each
solution is an equation for the unknown. If a complete solution was
found, the unknown will appear by itself on the left-hand side of the
equation. On the other hand, if the solve package could not find a
solution, the ``solution'' will be an equation for the unknown in terms
of the operator {\tt ROOT\_OF}\ttindex{ROOT\_OF}. If there
are several unknowns, each solution will be a list of equations for the
unknowns. For example,
\begin{verbatim}
solve(x^2=1,x); -> {X=-1,X=1}
solve(x^7-x^6+x^2=1,x)
6
-> {X=ROOT_OF(X_ + X_ + 1,X_,TAG_1),X=1}
solve({x+3y=7,y-x=1},{x,y}) -> {{X=1,Y=2}}.
\end{verbatim}
The TAG argument is used to uniquely identify those particular solutions.
Solution multiplicities are stored in the global variable {\tt
ROOT\_MULTIPLICITIES} rather than the solution list. The value of this
variable is a list of the multiplicities of the solutions for the last
call of {\tt SOLVE}. \ttindex{SOLVE} For example,
\begin{verbatim}
solve(x^2=2x-1,x); root_multiplicities;
\end{verbatim}
gives the results
\begin{verbatim}
{X=1}
{2}
\end{verbatim}
If you want the multiplicities explicitly displayed, the switch
{\tt MULTIPLICITIES}\ttindex{MULTIPLICITIES} can be turned on. For example
\begin{verbatim}
on multiplicities; solve(x^2=2x-1,x);
\end{verbatim}
yields the result
\begin{verbatim}
{X=1,X=1}
\end{verbatim}
\subsection{Handling of Undetermined Solutions}
When {\tt SOLVE} cannot find a solution to an equation, it normally
returns an equation for the relevant indeterminates in terms of the
operator {\tt ROOT\_OF}.\ttindex{ROOT\_OF} For example, the expression
\begin{verbatim}
solve(cos(x) + log(x),x);
\end{verbatim}
returns the result
\begin{verbatim}
{X=ROOT_OF(COS(X_) + LOG(X_),X_,TAG_1)} .
\end{verbatim}
An expression with a top-level {\tt ROOT\_OF} operator is implicitly a
list with an unknown number of elements (since we don't always know how
many solutions an equation has). If a substitution is made into such an
expression, closed form solutions can emerge. If this occurs, the {\tt
ROOT\_OF} construct is replaced by an operator {\tt ONE\_OF}.\ttindex{ONE\_OF}
At this point it is of course possible to transform the result of the
original {\tt SOLVE} operator expression into a standard {\tt SOLVE}
solution. To effect this, the operator {\tt EXPAND\_CASES}
\ttindex{EXPAND\_CASES} can be used.
The following example shows the use of these facilities:
\extendedmanual{\newpage}
\begin{verbatim}
solve(-a*x^3+a*x^2+x^4-x^3-4*x^2+4,x);
2 3
{X=ROOT_OF(A*X_ - X_ + 4*X_ + 4,X_,TAG_2),X=1}
sub(a=-1,ws);
{X=ONE_OF({2,-1,-2},TAG_2),X=1}
expand_cases ws;
{X=2,X=-1,X=-2,X=1}
\end{verbatim}
\subsection{Solutions of Equations Involving Cubics and Quartics}
Since roots of cubics and quartics can often be very messy, a switch
{\tt FULLROOTS}\ttindex{FULLROOTS} is available, that, when off (the
default), will prevent the production of a result in closed form. The
{\tt ROOT\_OF} construct will be used in this case instead.
In constructing the solutions of cubics and quartics, trigonometrical
forms are used where appropriate. This option is under the control of a
switch {\tt TRIGFORM},\ttindex{TRIGFORM} which is normally on.
The following example illustrates the use of these facilities:
\begin{verbatim}
let xx = solve(x^3+x+1,x);
xx;
3
{X=ROOT_OF(X_ + X_ + 1,X_)}
on fullroots;
xx;
- SQRT(31)*I
ATAN(---------------)
3*SQRT(3)
{X=(I*(SQRT(3)*SIN(-----------------------)
3
\end{verbatim}
\newpage
\begin{verbatim}
- SQRT(31)*I
ATAN(---------------)
3*SQRT(3)
- COS(-----------------------)))/SQRT(3),
3
- SQRT(31)*I
ATAN(---------------)
3*SQRT(3)
X=( - I*(SQRT(3)*SIN(-----------------------)
3
- SQRT(31)*I
ATAN(---------------)
3*SQRT(3)
+ COS(-----------------------)))/SQRT(
3
3),
- SQRT(31)*I
ATAN(---------------)
3*SQRT(3)
2*COS(-----------------------)*I
3
X=----------------------------------}
SQRT(3)
off trigform;
xx;
2/3
{X=( - (SQRT(31) - 3*SQRT(3)) *SQRT(3)*I
2/3 2/3
- (SQRT(31) - 3*SQRT(3)) - 2 *SQRT(3)*I
2/3 1/3 1/3
+ 2 )/(2*(SQRT(31) - 3*SQRT(3)) *6
1/6
*3 ),
2/3
X=((SQRT(31) - 3*SQRT(3)) *SQRT(3)*I
2/3 2/3
- (SQRT(31) - 3*SQRT(3)) + 2 *SQRT(3)*I
2/3 1/3 1/3
+ 2 )/(2*(SQRT(31) - 3*SQRT(3)) *6
1/6
*3 ),
2/3 2/3
(SQRT(31) - 3*SQRT(3)) - 2
X=-------------------------------------}
1/3 1/3 1/6
(SQRT(31) - 3*SQRT(3)) *6 *3
\end{verbatim}
\subsection{Other Options}
If {\tt SOLVESINGULAR}\ttindex{SOLVESINGULAR} is on (the default setting),
degenerate systems such as {\tt x+y=0}, {\tt 2x+2y=0} will be solved by
introducing appropriate arbitrary constants.
The consistent singular equation 0=0 or equations involving functions with
multiple inverses may introduce unique new indeterminant kernels
{\tt ARBCOMPLEX(j)}, or {\tt ARBINT(j)}, ($j$=1,2,...), % {\tt ARBREAL(j)},
representing arbitrary complex or integer numbers respectively. To
automatically select the principal branches, do {\tt off allbranch;} .
\ttindex{ALLBRANCH} To avoid the introduction of new indeterminant kernels
do {\tt OFF ARBVARS}\ttindex{ARBVARS} -- then no equations are generated for the free
variables and their original names are used to express the solution forms.
To suppress solutions of consistent singular equations do
{\tt OFF SOLVESINGULAR}.
To incorporate additional inverse functions do, for example:
\begin{verbatim}
put('sinh,'inverse,'asinh);
put('asinh,'inverse,'sinh);
\end{verbatim}
together with any desired simplification rules such as
\begin{verbatim}
for all x let sinh(asinh(x))=x, asinh(sinh(x))=x;
\end{verbatim}
For completeness, functions with non-unique inverses should be treated as
{\tt \verb|^|}, {\tt SIN}, and {\tt COS} are in the {\tt SOLVE}
\ttindex{SOLVE} module source.
Arguments of {\tt ASIN} and {\tt ACOS} are not checked to ensure that the
absolute value of the real part does not exceed 1; and arguments of
{\tt LOG} are not checked to ensure that the absolute value of the imaginary
part does not exceed $\pi$; but checks (perhaps involving user response
for non-numerical arguments) could be introduced using
{\tt LET}\ttindex{LET} statements for these operators.
\subsection{Parameters and Variable Dependency}
The proper design of a variable sequence
supplied as a second argument to {\tt SOLVE} is important
for the structure of the solution of an equation system.
Any unknown in the system
not in this list is considered totally free. E.g.\ the call
\begin{verbatim}
solve({x=2*z,z=2*y},{z});
\end{verbatim}
produces an empty list as a result because there is no function
$z=z(x,y)$ which fulfills both equations for arbitrary $x$ and $y$ values.
In such a case the share variable {\tt requirements}\ttindex{requirements}
displays a set of restrictions for the parameters of the system:
\begin{verbatim}
requirements;
{x - 4*y}
\end{verbatim}
The non-existence of a formal solution is caused by a
contradiction which disappears only if the parameters
of the initial system are set such that all members
of the requirements list take the value zero.
For a linear system the set is complete: a solution
of the requirements list makes the initial
system solvable. E.g.\ in the above case a substitution
$x=4y$ makes the equation set consistent. For a non-linear
system only one inconsistency is detected. If such a system
has more than one inconsistency, you must reduce them
one after the other.
\footnote{
The difference between linear and non--linear
inconsistent systems is based on the algorithms which
produce this information as a side effect when attempting
to find a formal solution; example:
$solve(\{x=a,x=b,y=c,y=d\},\{x,y\}$ gives a set $\{a-b,c-d\}$
while $solve(\{x^2=a,x^2=b,y^2=c,y^2=d\},\{x,y\}$ leads to $\{a-b\}$.
}
The set shows you also the dependency among the parameters: here
one of $x$ and $y$ is free and a formal solution of the system can be
computed by adding it to the variable list of {\tt solve}.
The requirement set is not unique -- there may be other such sets.
A system with parameters may have a formal solution, e.g.\
\begin{verbatim}
solve({x=a*z+1,0=b*z-y},{z,x});
y a*y + b
{{z=---,x=---------}}
b b
\end{verbatim}
which is not valid for all possible values of the parameters.
The variable {\tt assumptions}\ttindex{assumptions} contains then a list of
restrictions: the solutions are valid only as long
as none of these expressions vanishes. Any zero of one of them
represents a special case that is not covered by the
formal solution. In the above case the value is
\extendedmanual{\newpage}
\begin{verbatim}
assumptions;
{b}
\end{verbatim}
which excludes formally the case $b=0$; obviously this special
parameter value makes the system singular. The set of assumptions
is complete for both, linear and non--linear systems.
{\tt SOLVE} rearranges the variable sequence
to reduce the (expected) computing time. This behavior is controlled
by the switch {\tt varopt}\ttindex{varopt}, which is on by default.
If it is turned off, the supplied variable sequence is used
or the system kernel ordering is taken if the variable
list is omitted. The effect is demonstrated by an example:
\begin{verbatim}
s:= {y^3+3x=0,x^2+y^2=1};
solve(s,{y,x});
6 2
{{y=root_of(y_ + 9*y_ - 9,y_),
3
- y
x=-------}}
3
off varopt; solve(s,{y,x});
6 4 2
{{x=root_of(x_ - 3*x_ + 12*x_ - 1,x_),
4 2
x*( - x + 2*x - 10)
y=-----------------------}}
3
\end{verbatim}
In the first case, {\tt solve} forms the solution as a set of
pairs $(y_i,x(y_i))$ because the degree of $x$ is higher --
such a rearrangement makes the internal computation of the Gr\"obner basis
generally faster. For the second case the explicitly given variable sequence
is used such that the solution has now the form $(x_i,y(x_i))$.
Controlling the variable sequence is especially important if
the system has one or more free variables.
As an alternative to turning off {\tt varopt}, a partial dependency among
the variables can be declared using the {\tt depend}\index{depend}
statement: {\tt solve} then rearranges the variable sequence but keeps any
variable ahead of those on which it depends.
\extendedmanual{\newpage}
\begin{verbatim}
on varopt;
s:={a^3+b,b^2+c}$
solve(s,{a,b,c});
3 6
{{a=arbcomplex(1),b= - a ,c= - a }}
depend a,c; depend b,c; solve(s,{a,b,c});
{{c=arbcomplex(2),
6
a=root_of(a_ + c,a_),
3
b= - a }}
\end{verbatim}
Here {\tt solve} is forced to put $c$ after $a$ and after $b$, but
there is no obstacle to interchanging $a$ and $b$.
| {
"alphanum_fraction": 0.6385762315,
"avg_line_length": 35.9107611549,
"ext": "tex",
"hexsha": "e9543d7154745faa4a29dc891f9aa331b5bc5835",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ca10f97c2cef1a258a4e9fade0a3133d1389d08e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Korosensei42/AtomicDecomposition",
"max_forks_repo_path": "atomic_Decomp/Redlog/reduce.doc/solve.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ca10f97c2cef1a258a4e9fade0a3133d1389d08e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Korosensei42/AtomicDecomposition",
"max_issues_repo_path": "atomic_Decomp/Redlog/reduce.doc/solve.tex",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ca10f97c2cef1a258a4e9fade0a3133d1389d08e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Korosensei42/AtomicDecomposition",
"max_stars_repo_path": "atomic_Decomp/Redlog/reduce.doc/solve.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3798,
"size": 13682
} |
\clearpage
\pagebreak
{
\centering
\Huge General Summary
\par
}
\paragraph{}
\Large
Aravinth is an interdisciplinary expert with 17+ years of global experience in science, engineering, arts and sustainability, with the core expertise in research, development and production of electro-mechanical embedded systems and digital products.
As an engineer, he worked for 2 big corporations in the telecom sector and worked for 8 tech startups in the sector of education, smart home, media, agriculture, energy, healthcare \& water, in Asia \& Europe, where one of the hardware products was sold in 30K+ quantities and one of the software products reached 10M+ users.
As an entrepreneur, he co-founded 7 organisations in the sector of education, sustainability, agriculture, consulting \& engineering services, in Asia \& Europe.
As a scientist, he researched at 4 academic research institutions in the field of wireless technologies, autonomous vehicles, biometrics, humanoid robotics \& machine learning, in Germany.
As an educator, he trained 300+ young innovators and mentored 120+ deep tech startups in the domain of advanced materials, healthcare, agriculture, aerospace, energy, automotive \& biotechnology, globally.
As an advisor, he is affiliated with 23 international organisations and supported several individuals, startups, accelerators, incubators, innovation hubs, corporations, investors, manufacturers, research institutions, universities, government initiatives, art galleries and social enterprises, in more than 50 countries.
As an artist, he has co-created several new media art installations for temporary and permanent exhibitions, which attracted more than 1.5 million visitors, in Germany, Australia \& USA.
As a community builder, he co-created and has been curating several communities for startups, sustainability, education \& art, in Asia, Europe \& Africa.
\clearpage
| {
"alphanum_fraction": 0.8026246719,
"avg_line_length": 68.0357142857,
"ext": "tex",
"hexsha": "fa6b3296dc4af129537c70a86925a65e5a277af5",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "07f2bbb4403553b3d588150a8e8e051833430725",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "AravinthPanch/aravinth.info",
"max_forks_repo_path": "aravinth-cv/content/cover-letter.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "07f2bbb4403553b3d588150a8e8e051833430725",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "AravinthPanch/aravinth.info",
"max_issues_repo_path": "aravinth-cv/content/cover-letter.tex",
"max_line_length": 325,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "07f2bbb4403553b3d588150a8e8e051833430725",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "AravinthPanch/aravinth.info",
"max_stars_repo_path": "aravinth-cv/content/cover-letter.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 393,
"size": 1905
} |
\index{reference cells}
The following five reference cells are covered by the UFC specification:
the reference \emph{interval},
the reference \emph{triangle},
the reference \emph{quadrilateral},
the reference \emph{tetrahedron} and
the reference \emph{hexahedron} (see Table~\ref{tab:ufc_reference_cells}).
\begin{table}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|l|c|c|c|}
\hline
Reference cell & Dimension & \#Vertices & \#Facets \\
\hline
\hline
The reference interval & 1 & 2 & 2 \\
\hline
The reference triangle & 2 & 3 & 3 \\
\hline
The reference quadrilateral & 2 & 4 & 4 \\
\hline
The reference tetrahedron & 3 & 4 & 4 \\
\hline
The reference hexahedron & 3 & 8 & 6 \\
\hline
\end{tabular}
\caption{Reference cells covered by the UFC specification.}
\label{tab:ufc_reference_cells}
\end{center}
\end{table}
The UFC specification assumes that each cell in a finite element mesh
is always isomorphic to one of the reference cells.
\section{The reference interval}
\index{interval}
The reference interval is shown in Figure~\ref{fig:interval} and is
defined by its two vertices with coordinates as specified in
Table~\ref{tab:interval,vertices}.
\begin{figure}
\begin{center}
\psfrag{0}{$0$}
\psfrag{1}{$1$}
\includegraphics[width=10cm]{eps/interval.eps}
\caption{The reference interval.}
\label{fig:interval}
\end{center}
\end{figure}
\begin{table}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|}
\hline
Vertex & Coordinate \\
\hline
\hline
$v_0$ & $x = 0$ \\
\hline
$v_1$ & $x = 1$ \\
\hline
\end{tabular}
\caption{Vertex coordinates of the reference interval.}
\label{tab:interval,vertices}
\end{center}
\end{table}
\section{The reference triangle}
\index{triangle}
The reference triangle is shown in Figure~\ref{fig:triangle} and is
defined by its three vertices with coordinates as specified in
Table~\ref{tab:triangle,vertices}.
\begin{figure}
\begin{center}
\psfrag{v0}{$(0, 0)$}
\psfrag{v1}{$(1, 0)$}
\psfrag{v2}{$(0, 1)$}
\includegraphics[width=8cm]{eps/triangle.eps}
\caption{The reference triangle.}
\label{fig:triangle}
\end{center}
\end{figure}
\begin{table}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|}
\hline
Vertex & Coordinate \\
\hline
\hline
$v_0$ & $x = (0, 0)$ \\
\hline
$v_1$ & $x = (1, 0)$ \\
\hline
$v_2$ & $x = (0, 1)$ \\
\hline
\end{tabular}
\caption{Vertex coordinates of the reference triangle.}
\label{tab:triangle,vertices}
\end{center}
\end{table}
\section{The reference quadrilateral}
\index{quadrilateral}
The reference quadrilateral is shown in Figure~\ref{fig:quadrilateral}
and is defined by its four vertices with coordinates as specified in
Table~\ref{tab:quadrilateral,vertices}.
\begin{figure}
\begin{center}
\psfrag{v0}{$(0, 0)$}
\psfrag{v1}{$(1, 0)$}
\psfrag{v2}{$(1, 1)$}
\psfrag{v3}{$(0, 1)$}
\includegraphics[width=8cm]{eps/quadrilateral.eps}
\caption{The reference quadrilateral.}
\label{fig:quadrilateral}
\end{center}
\end{figure}
\begin{table}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|}
\hline
Vertex & Coordinate \\
\hline
\hline
$v_0$ & $x = (0, 0)$ \\
\hline
$v_1$ & $x = (1, 0)$ \\
\hline
$v_2$ & $x = (1, 1)$ \\
\hline
$v_3$ & $x = (0, 1)$ \\
\hline
\end{tabular}
\caption{Vertex coordinates of the reference quadrilateral.}
\label{tab:quadrilateral,vertices}
\end{center}
\end{table}
\section{The reference tetrahedron}
\index{tetrahedron}
The reference tetrahedron is shown in Figure~\ref{fig:tetrahedron} and
is defined by its four vertices with coordinates as specified in
Table~\ref{tab:tetrahedron,vertices}.
\begin{figure}
\begin{center}
\psfrag{v0}{$(0, 0, 0)$}
\psfrag{v1}{$(1, 0, 0)$}
\psfrag{v2}{$(0, 1, 0)$}
\psfrag{v3}{$(0, 0, 1)$}
\includegraphics[width=6cm]{eps/tetrahedron.eps}
\caption{The reference tetrahedron.}
\label{fig:tetrahedron}
\end{center}
\end{figure}
\begin{table}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|}
\hline
Vertex & Coordinate \\
\hline
\hline
$v_0$ & $x = (0, 0, 0)$ \\
\hline
$v_1$ & $x = (1, 0, 0)$ \\
\hline
$v_2$ & $x = (0, 1, 0)$ \\
\hline
$v_3$ & $x = (0, 0, 1)$ \\
\hline
\end{tabular}
\caption{Vertex coordinates of the reference tetrahedron.}
\label{tab:tetrahedron,vertices}
\end{center}
\end{table}
\section{The reference hexahedron}
\index{hexahedron}
The reference hexahedron is shown in Figure~\ref{fig:hexahedron} and
is defined by its eight vertices with coordinates as specified in
Table~\ref{tab:hexahedron,vertices}.
\begin{figure}
\linespread{1.2}\selectfont
\begin{center}
\psfrag{v0}{$(0, 0, 0)$}
\psfrag{v1}{$(1, 0, 0)$}
\psfrag{v2}{$(1, 1, 0)$}
\psfrag{v3}{$(0, 1, 0)$}
\psfrag{v4}{$(0, 0, 1)$}
\psfrag{v5}{$(1, 0, 1)$}
\psfrag{v6}{$(1, 1, 1)$}
\psfrag{v7}{$(0, 1, 1)$}
\includegraphics[width=9cm]{eps/hexahedron.eps}
\caption{The reference hexahedron.}
\label{fig:hexahedron}
\end{center}
\end{figure}
\begin{table}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|}
\hline
Vertex & Coordinate \\
\hline
\hline
$v_0$ & $x = (0, 0, 0)$ \\
\hline
$v_1$ & $x = (1, 0, 0)$ \\
\hline
$v_2$ & $x = (1, 1, 0)$ \\
\hline
$v_3$ & $x = (0, 1, 0)$ \\
\hline
\end{tabular}
\begin{tabular}{|c|c|}
\hline
Vertex & Coordinate \\
\hline
\hline
$v_4$ & $x = (0, 0, 1)$ \\
\hline
$v_5$ & $x = (1, 0, 1)$ \\
\hline
$v_6$ & $x = (1, 1, 1)$ \\
\hline
$v_7$ & $x = (0, 1, 1)$ \\
\hline
\end{tabular}
\caption{Vertex coordinates of the reference hexahedron.}
\label{tab:hexahedron,vertices}
\end{center}
\end{table}
| {
"alphanum_fraction": 0.5985904213,
"avg_line_length": 24.8725099602,
"ext": "tex",
"hexsha": "d02441567175197af13bc7e6e70fe8b5024808bf",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "083251420eb934d860c99dcf1eb07ae5b8ba7e8c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "szmurlor/fiver",
"max_forks_repo_path": "src/ufc-2.0.5/doc/manual/chapters/referencecells_common.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "083251420eb934d860c99dcf1eb07ae5b8ba7e8c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "szmurlor/fiver",
"max_issues_repo_path": "src/ufc-2.0.5/doc/manual/chapters/referencecells_common.tex",
"max_line_length": 74,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "083251420eb934d860c99dcf1eb07ae5b8ba7e8c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "szmurlor/fiver",
"max_stars_repo_path": "src/ufc-2.0.5/doc/manual/chapters/referencecells_common.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2285,
"size": 6243
} |
\documentclass{llncs}
\def\shownotes{1}
\def\notesinmargins{0}
%\usepackage{fullpage}
\usepackage{mathtools,color,xcolor,hyperref,graphicx,wrapfig,listings,array,xspace}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{tikz}
\usetikzlibrary{arrows}
\usepackage{amsfonts}
% https://tex.stackexchange.com/questions/11719/how-do-i-backcolor-in-verbatim
\usepackage{xcolor}
\usepackage{alltt}
% Compensate for fbox sep:
\newcommand\Hi[2][lightgray]{%
\hspace*{-\fboxsep}%
\colorbox{#1}{#2}%
\hspace*{-\fboxsep}%
}
\ifnum\shownotes=1
\ifnum\notesinmargins=1
\newcommand{\authnote}[2]{\marginpar{\parbox{\marginparwidth}{\tiny %
\textsf{#1 {\textcolor{blue}{notes: #2}}}}}%
\textcolor{blue}{\textbf{\dag}}}
\else
\newcommand{\authnote}[2]{
\textsf{#1 \textcolor{blue}{: #2}}}
\fi
\else
\newcommand{\authnote}[2]{}
\fi
\newcommand{\snote}[1]{{\authnote{\textcolor{red}{Scalahub notes}}{#1}}}
\newcommand{\knote}[1]{{\authnote{\textcolor{green}{kushti notes}}{#1}}}
\newcommand{\ret}{\mathsf{ret}}
\newcommand{\new}{\mathsf{new}}
\newcommand{\hnew}{h_\mathsf{new}}
\newcommand{\old}{\mathsf{old}}
\newcommand{\op}{\mathsf{op}}
\newcommand{\verifier}{\mathcal{V}}
\newcommand{\prover}{\mathcal{P}}
\newcommand{\key}{\mathsf{key}}
\newcommand{\nextkey}{\mathsf{nextKey}}
\newcommand{\node}{\mathsf{t}}
\newcommand{\parent}{\mathsf{p}}
\newcommand{\leaf}{\mathsf{f}}
\newcommand{\vl}{\mathsf{value}}
\newcommand{\balance}{\mathsf{balance}}
\newcommand{\lft}{\mathsf{left}}
\newcommand{\rgt}{\mathsf{right}}
\newcommand{\lbl}{\mathsf{label}}
\newcommand{\direction}{\mathsf{d}}
\newcommand{\oppositedirection}{\bar{\mathsf{d}}}
\newcommand{\found}{\mathsf{found}}
\newcommand{\mypar}[1]{\smallskip\noindent\textbf{#1.}\ \ \ }
\newcommand{\ignore}[1]{}
\newcommand{\langname}{ErgoScript\xspace}
\newcommand{\powname}{Autolykos\xspace}
\newcommand{\poolname}{ErgoPool\xspace}
\newcommand{\mixname}{ErgoMix\xspace}
\newcommand{\lst}[1]{\text{\lstinline[basicstyle={\ttfamily}]$#1$}}
\newcommand{\andnode}{\ensuremath{\mathsf{AND}}}
\newcommand{\ornode}{\ensuremath{\mathsf{OR}}}
\newcommand{\tnode}{\ensuremath{\mathsf{THRESHOLD}}}
\newcommand{\primname}{Commitment of Work\xspace}
\newcommand{\prim}{CoW\xspace}
\newcommand{\GF}{\ensuremath{\mathrm{GF}}}
\begin{document}
\title{Bypassing Non-Outsourceable Proof-of-Work Schemes Using Collateralized Smart Contracts}
\author{Alexander Chepurnoy\inst{1,2}, Amitabh Saxena\inst{1}}
\institute{Ergo Platform \\\email{\{kushti\}@protonmail.ch, \{amitabh123\}@gmail.com} \and
IOHK Research \\\email{\{alex.chepurnoy\}@iohk.io}}
\maketitle
\begin{abstract}
Centralized pools and renting of mining power are considered as sources of possible censorship threats and even 51\%
attacks for decentralized cryptocurrencies. Non-outsourceable Proof-of-Work (PoW) schemes have been proposed to tackle these issues. However, tenets in the folklore say that such schemes could potentially be bypassed by using escrow mechanisms.
In this work, we propose a concrete example of such a mechanism which is using collateralized smart contracts. Our approach allows miners to bypass non-outsourceable PoW schemes
if the underlying blockchain platform supports smart contracts in a sufficiently advanced language. In particular, the language should allow access to the PoW solution.
At a high level, our approach requires the miner to lock some collateral covering the reward amount and protected by a smart contract acting as an escrow. The smart contract allows the pool to collect the collateral as soon as the miner collects any block rewards. We propose two variants of the approach depending on when the collateral is bound to the block solution. Using this, we show how to bypass previously proposed non-outsourceable Proof-of-Work schemes (with the notable exception for strong non-outsourceable schemes) and show how to build mining pools for such schemes.
\end{abstract}
\section{Introduction}
Security of Bitcoin and many other cryptocurrencies relies on so called Proof-of-Work (PoW) schemes~(also known as scratch-off puzzles), which are mechanisms to reach fast consensus and guarantee immutability of the ledger. Security of such consensus mechanisms is based on the assumption that no single entity controls a large part of the mining power. For example, if a single entity controls
33\% then it can earn unproportionally more rewards using {\em selfish mining}~\cite{selfish}, and with more than 50\% an adversary can do double spending or filter out certain transactions. Mining pools are the primary cause of such concentration of mining power.
However, individually, it is more beneficial for miners to join pools despite the fact that they are detrimental to the system. Another threat, especially for new cryptocurrencies are potential Goldfinger attacks using hosted mining services to rent mining power in order to mine~(or attack) a cryptocurrency~\cite{goldfinger}.
Non-outsourceable scratch-off puzzles have been proposed to address these issues~\cite{miller2015nonoutsourceable,daian2017short}. A notable example of a real world implementation of this idea is Ergo~\cite{ergo}, whose PoW, \powname~\cite{autolykos}, is based on~\cite{daian2017short}. The primary technique in such approaches is to discourage pooled mining by tying the rewards to some trapdoor information needed for solution generation. In this work, we describe how to bypass the non-outsourceability of many such schemes, including Ergo's.
More importantly, while our solution bypasses non-outsourceability, which gives the ability to form pools, it retains the {\em censorship resistance} property of non-outsourceable puzzles. That is, a pool cannot conduct 51\% attacks even if it handles more than 51\% of the mining power (see Section~\ref{pool-levels} for details).
The rest of the paper is organized as follows. Section~\ref{background} contains an overview of the current state of affairs in proof-of-work schemes and pooled mining along a high level overview of non-outsourceable puzzles. Section~\ref{bypasstype1} describes one approach for creating mining pools in many types of non-outsourceable puzzles, specifically those of~\cite{daian2017short}. Section~\ref{bypasstype2} describes another approach that covers a wider range of puzzles~\cite{daian2017short,miller2015nonoutsourceable}. We conclude the paper in Section~\ref{conclusion} along with pointers for future research.
\section{Background}
\label{background}
\subsection{Proofs of Work}
We first describe the vanilla proof-of-work (PoW) mechanism used in Bitcoin. In order to generate a block in Bitcoin, a miner collects a number of unconfirmed transactions and organizes them into a Merkle tree.
% All nodes of this tree except the leaves are 32 byte hashes computed from its children.
The digest of this tree, denoted $t$ here, is stored in a section of the block called the {\em block header}, which also includes $h$, the hash of the previous block's header, and $n$, a random string called the nonce. We use the term $m$ to denote the puzzle made of the concatenation of the Merkle tree digest and the hash of the previous block. That is, $m = t\|h$ and
then the header is of the form $m\|n$.
The solution is also determined by another parameter $\lambda > 1$, called the {\em difficulty}. Let $H$ be a collision
resistant hash function with output of 256 bits.
The header $m\|n$ is considered a valid solution
if $H(m\|n) \leq 2^{256}/\lambda$.
A miner repeatedly tries different values of $n$ (and possibly $m$) until a solution is found.
Since $H$ is like a random oracle, the probability of finding a solution in one attempt is
$1 / \lambda$.
All PoW systems use the above idea of finding a value from a uniform distribution that falls within some narrower range based on the difficulty parameter.
\subsection{Pooled Mining}
\label{pool-levels}
Bitcoin allows mining pools, which roughly work as follows. The pool distributes work based on a some $m$ that it decides. Each miner tries to find a solution for the given $m$ and any solution found is sent to the network.
A miner actually tries to find a {\em share}, which is like a solution but with reduced difficulty (also decided by the pool). Some of the shares may also be real solutions, which result in valid blocks. A miner gets paid by the number of shares submitted. This is possible because the Bitcoin PoW puzzle is a {\em scratch-off puzzle}~\cite{miller2015nonoutsourceable}, a type of PoW puzzle that can be processed in parallel by multiple non-communicating entities with an appropriate reduction in search time.
The pool generates the potential block candidates as if it was solo mining, and then distributes that candidate to its miners for solving, which can be considered workers for the pool. The shares have no actual value and are just an accounting tool used by the pool to keep track of the work done by each worker. The key observation with pools is that miners do work for some other entity who then distributes the rewards back to the workers.
Since the pool selects the transactions that are to be included, this gives the pool greater control over the entire blockchain network. We define this using three levels of (de)centralization that a pool may operate at.
\begin{enumerate}
\item {\em Level 1 (Centralized):} The pool operator defines both $m$ and the reward address. Thus, a pool operator has full control over which transactions are included (censorship) and also carries the risk of losing the rewards.
\item {\em Level 2 (Censorship Resistant):} The pool operator does not define $m$ but collects the rewards. This is resistant to censorship but still carries the risk of losing the rewards. % This is what we will try achieve.
\item {\em Level 3 (Decentralized):} There is no centralized pool operator but rather another decentralized oracle that emulates the pool operator and rewards are automatically given to the participants based on the shares they submitted (see P2Pool~\cite{chesterman2018p2pool} for Bitcoin and SmartPool~\cite{luu2017smartpool} for Ethereum). In P2Pool, this oracle is implemented using another blockchain, while in SmartPool, it is implemented using a smart contract.
\end{enumerate}
The following table summarizes the concepts.\\~\\
\begin{tabular}{|l|l|l|l|}\hline
Pool level & Censorship & Reward theft risk &Example\\\hline
L1 (Centralized) &Yes & Yes & BTC.com\\
L2 (Censorship Resistant) & No & Yes &ErgoPool (this work)\\
L3 (Decentralized) & No & No &SmartPool\cite{luu2017smartpool}, P2Pool~\cite{chesterman2018p2pool}\\\hline
\end{tabular}
~\\
The primary issue with pools is that they increase the potential of transaction censorship and 51 percent attacks. One way to address this issue is to disallow pools entirely. This is what non-outsourceable puzzles aim to achieve, and Ergo is the first practical implementation of such puzzles~\cite{ergo}. Thus, such puzzles are designed to provide the same level of security as an L3 pool.
We, however, note that disallowing pools entirely comes with its own set of problems. For instance, at Ergo launch, the difficulty went up so quickly that miners with single GPUs could not find any blocks in a reasonable time. Since Ergo does not allow pools, such miners had no incentive to continue mining. In fact, this research was motivated from the need to create a mining pool for Ergo. However, we also want our solution to retain the security offered by lack of pools, that is, resistance to censorship and 51\% attacks.
Our solution is based on the observation that another way to address censorship and 51 percent attacks is to have pools operate at levels L2 or L3, where these issues are not present. Thus, not only can we have decentralization in mining but also have all the benefits of pools (such as regular income for miners and thereby, stronger network). Our solution is designed for L2 but can also be trivially extended to operate at L1. Additionally, it may be possible to extend it to L3 using approaches similar to SmartPool or P2Pool, which we leave as a topic for further research.
\subsection{Non-Outsourceable Puzzles}
We start with overviewing (non-)outsourceability definitions in existing literature expressed in different works, such as
Non-outsourceable Scratch-Off Puzzles~\cite{miller2015nonoutsourceable}, 2-Phase Proof-of-Work (2P-PoW)~\cite{twophase},
PieceWork~\cite{daian2017short}, Autolykos~\cite{autolykos}. The details of these approaches are described in Sections~\ref{bypasstype1} and~\ref{bypasstype2}. However, at a high level, all these approaches can be broadly classified into two categories.
In the first one~\cite{autolykos,daian2017short,twophase}, which we call \textbf{Type 1}, a PoW scheme is considered
non-outsourceable if it is not possible to iterate over the solution space without knowing some trapdoor information (such as a secret key) corresponding to some public information (such as a public key) contained in the block header, with block rewards locked by that trapdoor information. The reasoning here is that in order to send the reward to a pool's address, each miner must know the secret corresponding to that address. However, a pool does not trust miners and so will not give the secret away to them.
In the other category
~\cite{miller2015nonoutsourceable}, called \textbf{Type 2}, a PoW scheme is considered non-outsourceable if for any solved block, a miner can generate another block efficiently with non-negligible probability. The motivation behind this definition is that a miner can get paid for shares
by trying to generate a block that pays the reward to the pool. In case of successful block generation, however, the miner could generate and broadcast another block that sends the reward elsewhere.
We further classify Type 2 into {\em weak} if the identity of the miner stealing the rewards can be ascertained and {\em strong} if the identity remains secret.
At a technical level, both Type 1 and 2 approaches rely on a miner's ability to steal the pool's rewards. The difference lies in the way this occurs. In Type 1 schemes, the miner is able to steal the reward {\em after} the block gets finalized.
In Type 2,
the reward can only be stolen {\em before} a block is finalized.
We note that all Type 2 schemes have an inherent problem that allows malicious actors to flood the network with a large number of valid but distinct solutions, thereby causing network partitions and instability. This causes the network to converge very slowly or result in several forks. Hence, we don't consider Type 2 schemes to be robust in reaching consensus, thereby making them impractical in the real world. We call this the {\em forking attack}. Strong Type 2 schemes are even more prone to this attack because there is no fear of detection. % (and penalization).
In this work, we bypass the non-outsourceability of all Type 1 and weak Type 2 schemes assuming that their platforms support
some minimal smart contract capability. The following table summarizes this.
~\\
\begin{tabular}{|l|l|l|l|l|}\hline
Puzzle type & Thief's identity & When rewards stolen & Forking attack &Bypassed \\\hline
1 & revealed & after block acceptance & no & yes \\
2 (weak) & revealed & before block acceptance & yes &yes \\
2 (strong) & secret & before block acceptance & yes & no \\\hline
\end{tabular}
\subsection{Execution Context in Smart Contracts}
\label{context}
To give understanding of how a smart contract can bypass non-outsourceability, we first explain what kind of data the contract
can access.
In PoW currencies, a block contains a compact section called the {\em header}, which is
enough to verify the PoW solution and check integrity of other sections (such as block transactions).
Execution context is what is available to a contract during execution. Considering UTXO-based cryptocurrencies, such as
Bitcoin and Ergo, we can think about following components of the execution context. At the bare minimum, the first level, the smart contract
should have access to the contents of the UTXO it is locking (i.e., its monetary value and any other data stored in it). At the second level, the smart contract may additionally have access to the spending transaction, that is, all its inputs and outputs. At the third level, the smart contract may have access to block header data in addition to the data at the second level. For example, in Ergo, the last ten block headers and also some parts of the next block header
(which are known in advance before the next block is mined) are also available in the execution context. Finally, at the fourth level, the execution context may contain the entire block with all sibling transactions. Note that since the execution context must fit into random-access memory of commodity hardware, accessing the full blockchain is not a realistic scenario. The following
table summarizes possible execution context components.
~\\
\begin{tabular}{|l|l|l|l|l|l|}\hline
Context level & UTXO & Transaction & Header & Block & Example \\ \hline
C1 & Yes & No & No & No & Bitcoin~\cite{Nak08} \\
C2 & Yes & Yes & No & No & -- \\
C3 & Yes & Yes & Yes & No & Ergo ~\cite{ergo} \\
C4 & Yes & Yes & Yes & Yes & -- \\\hline
\end{tabular}
\section{Pooled Mining in Type 1 Puzzles}
\label{bypasstype1}
In a nutshell, Type 1 puzzles use a combination of two approaches. The first approach is to replace the hash function with a digital signature (i.e., use public-key cryptography instead of symmetric key cryptography for obtaining the final solution). The second approach is to tie the public key to the rewards.
\subsection{Using Public-Key Cryptography}
The method requires a randomized signature scheme that is strongly unforgeable under adaptive chosen message attacks (s-UFCMA) and outputs signatures uniformly spread over some range irrespective of how the signer behaves. Schnorr signature is one such scheme~\cite{Sch91}.
A candidate block header is constructed using transactions as in Bitcoin along with a public key $p$. A valid block header is a candidate block header along with a signature $d$ that verifies with this public key and satisfies the difficulty constraints as before (i.e., is less than a certain value). The difficulty parameter is automatically adjusted as in Bitcoin.
One real-world implementation of this concept is \powname \cite{autolykos}, the PoW algorithm of Ergo~\cite{ergo}. \powname uses a variation of Schnorr signatures~\cite{Sch91}, where the goal of a miner is to output $d$ such that $d < 2^{256}/\lambda$ and $\lambda$ is the difficulty parameter. The value $d$ is to be computed as follows. First compute $r = H(m\|n\|p\|w)$ where $m$ is the transactions digest, $n$ is a nonce, $p$ is a public key (an elliptic curve group element) and $w$ is an ephemeral public key that should never be reused in two different blocks. Let $x$ be the corresponding private key of $w$. Compute $d = xr - s$, where $s$ is the private key corresponding to $p$.
\subsection{Tying Public-Key to Rewards}
The second technique in making a Type 1 pool-resistant scheme is to tie the rewards to the public key $p$ contained in the block solution. That is, the platform enforces that any mining rewards are protected by the statement {\em prove knowledge of secret key corresponding to the public key $p$ (from the block solution)}
We consider Ergo as an example here. Rather than enforcing this logic within the protocol, Ergo uses smart contracts to enforce it. In particular, this rule is enforced in a so called \textbf{Emission box}\footnote{A box is just a fancy name for a UTXO. We will use these two terms interchangeably.}, a UTXO which contains all the ergs (Ergo's primary token) that will ever be emitted in rewards. The box is protected by a script that enforces certain conditions on how the rewards must be collected. In particular, it requires that a reward transaction has exactly two outputs, such that the first is another emission box containing the remaining ergs and the second is a box with the miners reward protected with the following script: {\em prove knowledge of the discrete logarithm (to some fixed base $g$) of group element $p$ AND height is greater than or equal to the box-creation height plus 720}.
This is possible because Ergo's (level C3) context includes the block solution. Note that the requirement of 720 blocks (which is roughly a day) is to prevent spending of rewards until they have almost no chance of being invalidated due to forks.
The above approach ensures that the private key used for finding the block solution is also needed for spending the rewards. Consequently, anyone who finds a block also has the ability to spend those rewards.
If we try to create any standard type of pool, we find that anyone having the ability to find a solution also has the ability to spend (i.e., steal) the reward. In fact, any standard pool must share the same private key among all participants, thereby making it impossible to determine the actual spender. This restriction also applies to decentralized schemes such as P2Pool and SmartPool because they both require that rewards be sent to addresses not under the miner's control.
\subsection{Creating a Mining Pool}
We now describe a pooling strategy for bypassing any Type 1 scheme, provided that the underlying smart contract language supports context level C3 or higher (see Section~\ref{context}). Hence one way to mitigate our method would be to restrict the smart contract language to level C2 or lower. Our concrete implementation uses Ergo as the underlying platform, which supports C3 context.
We will follow the {\em pay-per-share} approach, where the reward is distributed among the miners based on the number of shares they submitted since the last payout. Our pool is designed to operate at centralization level L2, where the pool only collects the rewards but does not select transactions (see Section~\ref{pool-levels}). Hence, it provides resistance against censorship and does not encourage 51\% attacks that are possible at L1. Note that the pool could also operate at L1 by creating miner-specific blocks using pair-wise shared public keys. However, this increases computational load on the pool and overall network usage.
\textbf{Basic variant:} We first describe a basic version that is insecure. We then patch the vulnerability to obtain the full version.
The key observation in our approach is that in a valid share, the reward need not necessarily be sent directly to the pool's address. What is actually necessary is that an amount equivalent to the reward is sent to the pool's address. This simple observation allows us to create a pool with the following rules:
\begin{enumerate}
\item Each miner can send the reward to his own public key $p$, whose secret key only he knows ({\em reward transaction}).
\item The block must also have another transaction sending the same amount as the reward to the pool address ({\em pool transaction}).
\end{enumerate}
%\snote{Maybe make this section less Ergo-specific}
A valid share is a solution to a block with the above structure. A miner can efficiently prove that a share is valid without having to send the entire block to the pool. It can simply send the pool transaction along with a Merkle proof that validates the transaction~\cite{Hearn:2012:BIP}.
A pool operator collects such shares (along with the proofs) and any funds thus received when a block is solved are distributed among the miners using the pay-per-share algorithm. To ensure that miners generate valid blocks, the pool randomly asks miners to provide full blocks corresponding to some of their shares and penalize those who cannot.
One drawback of this is that each miner must have sufficient collateral to cover the reward amount at any time, even though the reward becomes spendable only after a `cooling-off period' (720 blocks in Ergo). Thus, there is a minimum period during which the collateral is spent but the reward is locked and cannot be used as further collateral.
Therefore, for uninterrupted mining, each miner must keep the reserved amount of at least 2 rewards (possibly more depending on the expected time to find a block).
To overcome this drawback, a pool may provide incentives such as allowing the miner to keep a fraction of the reward (example for the current reward of 67.5 ergs in Ergo, the pool may require only 65 ergs to be sent to it).
\textbf{The broadcast attack:} Let Alice be a miner with public key \texttt{alice}. If such a system is used in, say Bitcoin, then the system becomes insecure. Once the pool-paying transaction is publicized, anyone (not necessarily Alice) may broadcast it, possibly using it as their own pool transaction.
\textbf{Enhanced variant:} The enhanced protocol mitigates the above attack. This is possible because \langname allows us to use the block solution in the context, using which we can secure the pool transaction as follows. Instead of paying to the pool from an arbitrary box (or boxes), Alice will instead store this collateral in a special box protected by the following script:
\begin{verbatim}
minerPubKey == alice
\end{verbatim}
A box with this script does not require a signature because the above statement only fixes the miner's public key to \texttt{alice} and does not enforce any other spending condition. Thus, anyone can create a transaction spending this box. However the transaction is valid only if the block that includes it is mined by Alice.
This ensures that the box can only be spent if and when Alice mines a block.
Alice creates her pool transaction using this box as input and submits her shares and proofs to the pool as before. She need not even use a private channel for this purpose and can broadcast this publicly. This enables the possibility of L3 decentralization level that requires public shares~\cite{chesterman2018p2pool,luu2017smartpool} (see Section~\ref{pool-levels}).
The above variant prevents the broadcast attack because knowing the pool transaction does not help the attacker in any way (since anyone can create that transaction without Alice's help). An attacker might try to spend Alice's collateral in a transaction paying to some address other than the pool address. However, Alice will discard such transactions when creating a candidate block and only include her pool paying transaction that spends the collateral. In the worst case, if Alice does not check for others spending her collateral, the mined block will still include her own pool-paying transaction double-spending the same collateral, thereby making the entire block invalid.
\textbf{Full variant:} Observe that the above collateral box is not spendable until Alice actually mines a block. Depending on her hardware and the global hash rate, this may take a very long time, and her funds will be stuck till then. We would like Alice to be able to withdraw her collateral at any time she decides to stop participating in the pool. This can be done as follows. Alice first sets another public key \texttt{aliceWithdraw} that she will use to withdraw the collateral (it is possible to keep \texttt{aliceWithdraw} = \texttt{alice}). The modified script is:
\begin{verbatim}
(minerPubKey == alice) || aliceWithdraw
\end{verbatim}
The first condition, \texttt{minerPubKey == alice}, ensures that when used to fund the pool output, the miner must be Alice as in the enhanced variant. The second condition, \texttt{bob}, ensures that the coins are not stuck till Alice finds a block, because it allows Alice may withdraw the collateral at any time.
Alice should fund the pool transaction by satisfying only the first condition and never the second condition, otherwise the broadcast attack becomes possible. The second condition is be used only for withdrawing collateral.
Note that the above allows everyone to create a transaction spending Alice's collateral box as long as Alice mines the transaction. Alice may have more than one collateral box protected by identical scripts. Thus, an attacker may try to spend Alice's box that is not used in the pool funding transaction. Of course, Alice should not include such transactions in her block. This requires Alice to implement additional checks. An easier solution is for Alice to use another public key, \texttt{aliceLock}, as below to ensure that only she can create a valid transaction.
\begin{verbatim}
((minerPubKey == alice) && aliceLock) || aliceWithdraw
\end{verbatim}
The above broadcast attack mitigation strategy requires C3 context level (i.e., access to \texttt{minerPubKey}) and will not work in lower levels. One may envisage a hiding strategy at C2 context level, where the pool transaction is not revealed in a share and only a commitment is revealed. The actual transaction is revealed only if a block is found or when a miner later proves to the pool that the shares were correct. However, this is also prone to broadcast attacks as follows. First note that there are two kinds of broadcast attacks. The first is the {\em leak-from-share} attack. The second is the {\em leak-from-orphaned-block} attack, where the transaction is extracted from a mined block that ends up getting orphaned. The hiding strategy mitigates the first attack but not the second.
\textbf{Weak Broadcast security:} We can obtain a weaker form of broadcast security for C2 context level by assuming a trusted pool as follows. A pool-paying transaction is created as before by spending some arbitrary input and paying to the pool address. The miner sends the shares along with the proofs to the pool over a private channel. The pool is trusted not to misuse the transaction. This addresses the leak-from-share attack. To address the leak-from-orphaned-block attack, the following strategy is used. Assume that the box funding the pool transaction contains a unique identifier of Alice (such as her public key) and a script that enforces any spending transaction to pay the pool. Lets us call this {\em Alice's funding box}. The pool then enforces the following rules internally.
\begin{enumerate}
\item Any transaction it receives from Alice's funding box that was not mined by Alice is considered {\em irregular}.
% , and the output of that transaction (paying the pool) as an {\em irregular} output.
\item Any irregular transaction using Alice's funding box is not considered for pool reward disbursement and the funds are refunded back to Alice.
\end{enumerate}
It is possible for everyone to verify that a given pool transaction is irregular if everyone knows Alice's public key. Thus, a pool cannot deny the existence of an irregular transaction. Refunds can also be made verifiable in many ways, such as by requiring the pool to create another funding box for Alice, which can be publicly verified. We can additionally require that the new funding box be created in a transaction that consumes the irregular transaction's output.
\section{Pooled Mining with Type 2 Puzzles}
% \section{10 Blocks}
\label{bypasstype2}
In Type 2 puzzles, a miner can produce~(with non-negligible probability) an alternative block for the same PoW solution~\cite{miller2015nonoutsourceable}.
For concreteness, we will use public key cryptography to illustrate this, as we did for Type 1 puzzles. However, our approach will work for any other implementation of such puzzles.
Recall that a Type 1 puzzle comprises of two steps: (1) embedding a public key $p$ in the block header, whose private key is needed in generating the solution, and (2) tying the block rewards to $p$.
A Type 2 puzzle can be considered a variation of a Type 1 puzzle, where Step 1 remains the same but Step 2 is modified so that the block rewards are not tied to $p$ but instead to another public key $a$ that is certified by $p$. In other words, the complete solution is defined using a tuple $(p, a, \textsf{cert}_p(a))$, where $\textsf{cert}_p(a)$ is a signature on $a$ that verifies with $p$.
The rationale behind non-outsourceability is that a cheating miner knowing the private key of $p$ can steal the reward as follows. When claiming shares, the miner behaves correctly. That is, it constructs the block so that rewards go to the pool public key $a$. However, if a real solution is found, the rewards are sent to the miner public $a'$ by creating a certificate $\textsf{cert}_p(a')$. Thus, as in Type 1 puzzles, the pool risks losing rewards if it shares secrets with miners.
\textbf{Watermarking:} In the basic Type 2 scheme, a pool can make it possible to identify stolen rewards by publicly fixing a watermark identifying its blocks in advance~\cite{miller2015nonoutsourceable}. A watermark in this context is something that is preserved even if the pool key $a$ is replaced by the miner key $a'$. A few examples are the certifying key $p$ or, say, half the bits of the nonce. If such a watermark is used then it becomes possible to identify the cases when the block rewards are stolen.
\textbf{Strong Type 2 puzzles:} In the above design, it is possible to determine when the rewards are stolen. For instance, using the public key $p$ as a watermark, a pool may declare in advance that for a given $p$, it only considers the pair $(p, a)$ as valid and any other pair $(p, a')$ indicates a theft. The stronger variant of Type 2 puzzles replaces signatures with zero knowledge proofs so that the two cases (block rewards stolen or not) become indistinguishable. Any Type 2 puzzle that is not strong is called {\em weak}.
We describe a smart contract that bypasses both Type 1 and (weak) Type 2 schemes. For sake of brevity, however, we only describe the Type 2 solution here. Recall that for such schemes, it is possible to detect when a particular watermark is being used in the block. In our approach, this watermark is attached to the miner instead of the pool. Thus, the pool will share pair-wise watermarks with every miner. Similar to the previous approach, we will also require the miner to lock some collateral that can be used by the pool to cover any rewards taken by the miner.
We also require the smart contract language to make available in the execution context the block solutions for not only the current block header but also the last $L$ block header prior to the current one.
Then a weak Type 2 scheme can be bypassed as follows. In order to participate in the pool, Alice creates an unspent box that locks collateral with the guard script: {\em payable to pool public key if at least one of the last $L$ headers contains the watermarked solution}. The same solution will also work for Type 1 schemes there because the block header cannot be efficiently altered without also altering the embedded public key. In \langname, for example, this can be implemented as: \texttt{poolPubKey \&\& lastHeaders.exists(h => h.minerPubKey == alice)}.
The method \texttt{exists} of \texttt{lastHeaders} takes as input another method, say $f$, that takes as input an header and outputs a \texttt{Boolean}. The method $f$ is applied to every element of \texttt{lastHeaders} and the output of \texttt{exists} is the OR of the outputs of $f$. In this case, $f$ outputs true if the miner public key in the header is Alice's public key.
A miner is permitted to send the reward to any chosen address, since as soon as a valid block is generated, the collateral becomes spendable by the pool. One way the miner can try to save the collateral is to generate $L$ blocks after the one with the watermark, but this case is unlikely for a pool user if $L$ is big enough. In Ergo, for example, $L = 10$, and the chance to generate 11 consecutive blocks is very small~\cite{Nak08}.
Note that the above script locks the collateral until Alice find a block, which may never happen. Hence, as in the Type 1 case, we need to allow Alice to withdraw collateral if she desires. However, the solution used in Type 1 (i.e., simply appending `\texttt{|| aliceWithdraw}') will not work here because the pool does not immediately get the collateral when Alice gets the reward, but rather after at most $L$ blocks. If we allow Alice to withdraw the collateral at any time, the she can withdraw it in the same block as the reward.
One solution would be to allow Alice to withdraw the collateral only after some fixed height $H$, while her participation in the pool using this collateral ends at height $H - L$, after which she must use new collateral. For simplicity, we skip this deadline condition for withdrawing the collateral by the miner in case a block is not found for a long time. However, a real world implementation must consider this.
\section{Conclusion and Further Work}
\label{conclusion}
Non-outsourceable puzzles have been proposed as a possible workaround for attacks that arise due to pool formation in PoW blockchains. Such solutions fall into two broad categories: Type 1, where the reward is directly bound to some trapdoor information used for generating the block solution (and thus, that information is needed while spending), and Type 2, where the reward is indirectly bound to the trapdoor information via a certificate. Type 2 schemes can be further classified into weak, where the identity of the miner is revealed, and strong, where the identity remains hidden.
In this paper we proposed two approaches to bypass non-outsourceability of Type 1 and weak Type 2
schemes to create mining pools, thereby `breaking' them. Our pools operates at level L2 (censorship resistance), where the pool does not control transactions to be included in blocks but only collects the rewards (see Section~\ref{pool-levels}). Such pools do not pose stability threats that L1 level pools do. Although our pools are most efficient when operating at L2, they can operate at L1 simply by having the pool create miner-specific blocks using their public keys.
Note that both L1 and L2 carry the risk of funds loss due to operator compromise. A topic of further investigation is to have the pools operate at L3, where there is no risk of losing funds.
Only strong Type 2 schemes~(where a miner does not provide a block solution in the clear, but rather provides an encrypted solution
along with zero-knowledge proof of its correctness) remain unbroken. However, it should also be noted that strong schemes are not very practical as they require a generic zero-knowledge proof system which imposes heavy burden on both the prover and verifier. Thus, such schemes currently have no implementations in the real world. Additionally, we note that Type 2 schemes in their entirety have an inherent weakness that make them impractical for real world use: the high possibility of forking attacks.
Both our approaches rely on smart contracts acting as decentralized escrows and require the underlying programming language to allow predicates at context level C3 or higher (i.e., access to the block solution; see Section~\ref{context}). Thus, one way to invalidate our methods would be to restrict the language context to level C2 or lower. Note that even level C2 contracts allow sophisticated applications such as non-interactive mixing, rock-paper-scissors, and even an ICO~\cite{advtutorial}.
Another open issue in mining pools is that of block withholding~\cite{courtois2014subversive}, where the miner tries to attack the pool by submitting valid shares but discarding actual solutions. The need for collateral in our schemes may possibly affect the attacker's strategy. This will be considered in a follow-up work.
\section*{Acknowledgements}
\label{*acknowledgements}
We would like to thank Mohammad Hasan Samadani and the ergopool.io team for building a pool based on this paper and highlighting real-world issues in regards with high-level scheme descriptions.
\bibliographystyle{unsrt}
\bibliography{main}
\appendix
\end{document}
| {
"alphanum_fraction": 0.7756266206,
"avg_line_length": 111.5564738292,
"ext": "tex",
"hexsha": "83ad660c62d145a533bffadd19778668c0593b00",
"lang": "TeX",
"max_forks_count": 131,
"max_forks_repo_forks_event_max_datetime": "2022-03-22T01:08:16.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-07-19T12:46:49.000Z",
"max_forks_repo_head_hexsha": "55af449ace6a7fd605130e8498dc5304f2ccffe4",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "kettlebell/ergo",
"max_forks_repo_path": "papers/ergopool/main.tex",
"max_issues_count": 886,
"max_issues_repo_head_hexsha": "55af449ace6a7fd605130e8498dc5304f2ccffe4",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T10:21:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-07-20T21:59:30.000Z",
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "kettlebell/ergo",
"max_issues_repo_path": "papers/ergopool/main.tex",
"max_line_length": 904,
"max_stars_count": 424,
"max_stars_repo_head_hexsha": "55af449ace6a7fd605130e8498dc5304f2ccffe4",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "kettlebell/ergo",
"max_stars_repo_path": "papers/ergopool/main.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-29T13:33:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-07-17T12:33:06.000Z",
"num_tokens": 9314,
"size": 40495
} |
\subsection{Up-Scaling Computational Efficiency}
The CDM model for this case requires about two orders of magnitude less computational effort than the DEM model (Table \ref{tab:computation}). The CDM simulation uses a comparable number of continuum elements ($29,866$) as in the DEM simulation ($25,898$) for comparison and adequate convergence. The CDM model efficiency can be improved by applying a Selectively Refined Mesh (SRM) where only the areas with stress concentrations and large stress gradients have a strongly refined mesh. With the SRM, a converged CDM solution is achievable with only $3,577$ elements leading to another order of magnitude reduction in computational effort.
\begin{table}[!htbp]
\centering
\caption{Comparison of Computational Time for the DNS}
\label{tab:computation}
\begin{tabular}{@{}ccccc@{}}
\toprule
\textbf{Simulation} & \textbf{No. Continuum} & \textbf{Processor} & \textbf{Slope Failure} & \textbf{Computational} \\
\textbf{Type} & \textbf{Elements} & \textbf{Clock Speed} & \textbf{Load} & \textbf{Time} \\ \midrule
DEM & $25,898$ & $2.20 GHz$ & $11.2 MPa$ & $46.5 hr$ \\
CDM & $29,866$ & $1.80 GHz$ & $11.5 MPa$ & $0.65 hr$ \\
CDM - SRM & $3,577$ & $1.80 GHz$ & $11.5 MPa$ & $0.013 hr$ \\ \bottomrule
\end{tabular}
\end{table}
The DEM simulation was run serially on a $2.2GHz$ CPU while the CDM simulation was run serially on a $1.8GHz$ CPU. Despite the CDM model having more continuum elements than the DEM model, and the CDM model running on a slower CPU, a decrease in computational time of the DEM simulation from $46.5 hr$ to $0.65 hr$ was observed. Running the CDM model with a SRM reduces the total computational time to $0.013 hr$, or eight minutes instead of two days. This large increase in computational efficiency with marginal decrease in model accuracy can be immensely useful for large scale geomechanical problems in NFR.
| {
"alphanum_fraction": 0.648487626,
"avg_line_length": 109.1,
"ext": "tex",
"hexsha": "ddffe9945f64e9d301307cfbf08bab84bdf2d4bb",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-06-29T23:14:09.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-29T23:14:09.000Z",
"max_forks_repo_head_hexsha": "9c9043effdb72a608ffec11726af97154751722e",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "yetisir/up-scaling-dem-simulations",
"max_forks_repo_path": "subsection_Up_Scaling_Computational_Efficiency__.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9c9043effdb72a608ffec11726af97154751722e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "yetisir/up-scaling-dem-simulations",
"max_issues_repo_path": "subsection_Up_Scaling_Computational_Efficiency__.tex",
"max_line_length": 641,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9c9043effdb72a608ffec11726af97154751722e",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "yetisir/up-scaling-dem-simulations",
"max_stars_repo_path": "subsection_Up_Scaling_Computational_Efficiency__.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 542,
"size": 2182
} |
\chapter{Searching Image Metadata With {\kw igrep}}
\label{chap:igrep}
\indexapi{igrep}
%\section{Overview}
The {\cf igrep} program search one or more image files for metadata
that match a string or regular expression.
\section{Using {\cf igrep}}
The {\cf igrep} utility is invoked as follows:
\bigskip
\hspace{0.25in} {\cf igrep} [\emph{options}] \emph{pattern} \emph{filename} ...
\medskip
Where \emph{pattern} is a POSIX.2 regular expression (just like the
Unix/Linux {\cf grep(1)} command), and \emph{filename} (and any
following names) specify images or directories that should be searched.
An image file will ``match'' if any of its metadata contains values
contain substring that are recognized regular expression. The image
files may be of any format recognized by \product (i.e., for which
\ImageInput plugins are available).
Example:
\begin{code}
$ igrep Jack *.jpg
bar.jpg: Keywords = Carly; Jack
foo.jpg: Keywords = Jack
test7.jpg: ImageDescription = Jack on vacation
\end{code}
% $
\section{{\cf igrep} command-line options}
\apiitem{--help}
Prints usage information to the terminal.
\apiend
\apiitem{-d}
Print directory names as it recurses. This only happens if the {\cf -r}
option is also used.
\apiend
\apiitem{-E}
Interpret the pattern as an extended regular expression (just like
{\cf egrep} or {\cf grep -E}).
\apiend
\apiitem{-f}
Match the expression against the filename, as well as the metadata
within the file.
\apiend
\apiitem{-i}
Ignore upper/lower case distinctions. Without this flag, the expression
matching will be case-sensitive.
\apiend
\apiitem{-l}
Simply list the matching files by name, surpressing the normal output
that would include the metadata name and values that matched.
For example:
\begin{code}
$ igrep Jack *.jpg
bar.jpg: Keywords = Carly; Jack
foo.jpg: Keywords = Jack
test7.jpg: ImageDescription = Jack on vacation
$ igrep -l Jack *.jpg
bar.jpg
foo.jpg
test7.jpg
\end{code}
\apiend
\apiitem{-r}
Recurse into directories. If this flag is present, any files specified
that are directories will have any image file contained therein to be
searched for a match (an so on, recursively).
\apiend
\apiitem{-v}
Invert the sense of matching, to select image files that \emph{do not}
match the expression.
\apiend
| {
"alphanum_fraction": 0.7330762639,
"avg_line_length": 23.5757575758,
"ext": "tex",
"hexsha": "7755ed709aaef89950e52d24d20c5b03597e26a2",
"lang": "TeX",
"max_forks_count": 11,
"max_forks_repo_forks_event_max_datetime": "2022-01-10T23:30:55.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-13T12:18:16.000Z",
"max_forks_repo_head_hexsha": "7605327477f0a8a04aad2c92c8ca8569e60124b3",
"max_forks_repo_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"max_forks_repo_name": "itadinanta/oiio",
"max_forks_repo_path": "src/doc/igrep.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "7605327477f0a8a04aad2c92c8ca8569e60124b3",
"max_issues_repo_issues_event_max_datetime": "2021-06-23T18:48:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-05-15T15:21:36.000Z",
"max_issues_repo_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"max_issues_repo_name": "itadinanta/oiio",
"max_issues_repo_path": "src/doc/igrep.tex",
"max_line_length": 79,
"max_stars_count": 73,
"max_stars_repo_head_hexsha": "7605327477f0a8a04aad2c92c8ca8569e60124b3",
"max_stars_repo_licenses": [
"Apache-2.0",
"BSD-3-Clause"
],
"max_stars_repo_name": "itadinanta/oiio",
"max_stars_repo_path": "src/doc/igrep.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-21T05:57:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-07T04:23:33.000Z",
"num_tokens": 627,
"size": 2334
} |
\chapter{Chapter 6: Solace from the Ice}
From this point in the run onwards flee from all random encounters
\begin{enumerate}
\item Walk to and enter the Inn
\item \cs\ then interact with the shelf (Small glint)
\item \cs\ after the cutscene is no longer black
\item Walk outside and then forward into the Ice Region and enter Icicle Ridge
\item Fast Forward until gaining control
\item Walk to gate and travel to Nne Wood Hills
\item \cs
\item Walk out of Girl's Tearoom, \cs
\item Walk to North Promenade, \cs
\item Hold right + mash circle to skip tutorial
\item Walk towards screen and left and use gate to travel to Nine Wood Hills
\item Go to North Promenade and talk to Chocolatte
\end{enumerate}
\begin{shop}
\textbf{Sell:}
\begin{itemize}
\item Everything
\end{itemize}
\textbf{Buy:}
\begin{itemize}
\item 15x Electro Marble (L1 / RB increases in increments of 10)
\item Confirm purchase
\item 5x Bomb Fragment
\end{itemize}
\end{shop}
\begin{enumerate}[resume]
\item Exit shop and travel to Sylver Park then use gate to return to Icicle Ridge
\item Walk right and trigger sizzle dialogue (Unskippable and unable to fast forward)
\item After sizzle is used, walk forward, down ramp and then do Puzzle.
\end{enumerate}
\begin{puzzle}
\begin{itemize}
\item Down, Left
\item Up, Right
\item Sizzle (Skip Chest)
\item Down, Right
\end{itemize}
\end{puzzle}
\begin{enumerate}[resume]
\item After Puzzle walk to next screen
\item \cs
\end{enumerate}
\begin{battle}[]{White Nakk x2}
\begin{itemize}
\item Auto-attack
\end{itemize}
\end{battle}
\begin{menu}
\textbf{Stack Setup}
\begin{itemize}
\item (2) \textbf{Black Nakk}, Lann, \textit{Empty}
\item (3) Black Chocochick, \textbf{Cockatrice}, Reynn
\end{itemize}
\textbf{Mirage Board}
\begin{itemize}
\item \textbf{Cockatrice:} $\rightarrow$ Sizzle, $\rightarrow$ Fire
\end{itemize}
\end{menu}
\begin{enumerate}[resume]
\item Continue walking along path until reaching Puzzle 2
\end{enumerate}
\begin{puzzle}
\begin{itemize}
\item Up, Right
\item Up, Right
\item Down
\end{itemize}
\end{puzzle}
\begin{enumerate}[resume]
\item Walk right along path and \pickup{squishsparkly}
\item Walk down and trigger battle
\end{enumerate}
\begin{battle}[]{White Nakk x3}
\begin{itemize}
\reynnf Fire
\lannf Red Fang
\end{itemize}
\end{battle}
\begin{enumerate}[resume]
\item Walk down and complete Gimme Golem
\item Do puzzle 3
\end{enumerate}
\begin{puzzle}
\begin{itemize}
\item Down, Left
\item Down, Right
\item Down
\end{itemize}
\end{puzzle}
\begin{enumerate}[resume]
\item Walk to next screen
\item \cs\ when image appears
\item Continue walking along path and do puzzle 4
\end{enumerate}
\begin{puzzle}
\begin{itemize}
\item Up, Left
\item Up, Right
\item Down, Right, Up
\item Sizzle and \pickup{Elixir}
\item Left, Down
\item Left, Up
\item Right, Up
\item Right, Up
\end{itemize}
\end{puzzle}
\begin{enumerate}[resume]
\item Walk to next screen
\item \save
\item Walk to Boss and \cs
\end{enumerate}
\begin{battle}[]{Grandfenrir}
\begin{itemize}
\reynnf Fire White Nakk
\lannf Red Fang White Nakk
\item After 3 White Nakks are dead, both throw bomb fragments at Grandfenrir until dead (5 Total)
\item Suicide Reynn
\item Red Fang last White Nakk
\end{itemize}
\end{battle}
\begin{enumerate}[resume]
\item Make Lann small and run forward and along path to exit Icicle Ridge
\item Run through Ice Region to Saronia
\end{enumerate} | {
"alphanum_fraction": 0.730494427,
"avg_line_length": 27.5511811024,
"ext": "tex",
"hexsha": "f5bf183719e9bcb7c1ae2ce0ff10c5a2137d08b2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_forks_repo_path": "World of Final Fantasy/Chapters/006_Chapter6.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_issues_repo_path": "World of Final Fantasy/Chapters/006_Chapter6.tex",
"max_line_length": 99,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8045824bbe960721865ddb9c216fe4e2377a2aae",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "HannibalSnekter/Final-Fantasy-Speedruns",
"max_stars_repo_path": "World of Final Fantasy/Chapters/006_Chapter6.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1175,
"size": 3499
} |
\chapter{Inter-Procedural Analysis}
Inter-procedural Analysis is required to obtain more precise results as it is very common that programs can have multiple function calls. It is essential to consider the effect of function call on the data flow value entering the node. Inter-procedural analysis takes into account call return , parameter passing , local variables of the function, return values and recursion into account. Major issue to be dealt while handling inter-procedural analysis is to deal with calling contexts. Handling concurrency also needs to be supported , which would be discussed in the next chapter. \\
\section{Context Sensitivity}
A context sensitive analysis is an inter-procedural analysis which analyses callee procedure for each context whereas context insensitive analysis performs analysis irrespective of calling context. Context insensitive analysis over-approximates inter procedural control flow which results in imprecision because it takes into account invalid control paths. Each function is analyzed once with single abstract context. Whereas context sensitive analysis is more precise as
it considers the valid inter procedural control flow. \\
Java is an object oriented language supporting features like encapsulation and inheritance. Thus data access is indirect through method calls for each class. So context sensitivity plays a very important role for Object Oriented languages.
\section{Call Graph}
Call Graph is graph with nodes and edges in which nodes represent procedures and there is edge from $a$ to $b$ if some call-site at $a$ calls procedure
$b$. Hence this is a static data structure that represents the run-time calling relationships among procedures in program. Soot provides a Spark engine which generates the call graph. VASCO on the other hand returns a much precise call graph, which is generated using liveness based inter-procedural pointer analysis. A thing to note here is that construction of call graph requires inter-procedural analysis and inter-procedural analysis on the other hand requires call graph. An approach to breaking this dependency is to initially approximate the call-graph and in every iteration perform inter-procedural analysis and improve the precision of the call graph.\cite{vasco} \\
Call multi-graph is a directed graph which represents calling relationships between procedures in a program where nodes represent procedures and edges procedure call. A cycle in the call multi-graph denotes recursion. Super graph is another representation in which callsites are connected to the callee procedure entry node and and the exit node of the callee is connected to return node in the caller procedure. \\
\begin{figure}[here]
\begin{center}
\includegraphics[scale=0.5]{Figures/callgraph.png}
\end{center}
\caption{Example of call multi-graph and super-graph}
\label{fig:call graph}
\end{figure}
Data flow analysis uses static representation of programs to compute summary information along paths. For ensuring safety, all the valid paths must be covered. A valid path is the path which represents legal control flow. Ensuring precision is subject to merging data flow values at shared program points without including invalid paths. For ensuring efficiency, only those valid paths that yield information that affects the summary information should be covered.
\section{Approaches to Inter-procedural Analysis}
In this section approaches to perform inter-procedural analysis is discussed. A very simple approach is to perform procedure in-lining where every procedure call is replaced by the procedure body. This would however only be applicable when target of the call is known and call is not made by pointers or is virtual. However this is not good way to handle recursion as the code size can increase in an unbounded manner. \\
\subsection{Functional approach}
In the functional approach, summary flow functions are computed for each function. The summary flow functions are used as the flow functions for the procedure call. The summary flow function of a given procedure is influenced by the summary flow functions of the callees of $r$ and not by the callers of $r$. Also in the presence of loops or recursion, iterative computation will be needed till fixed point is achieved. Termination is only guaranteed if, lattice is finite.
\subsection{Call Strings Approach}
This is a general flow and context sensitive method. In this approach the call history is stored for information to be propagated back to the correct point. Call string at a program point is the sequence of unfinished calls reaching that point starting from the main procedure call. The data flow equations are changed to incorporate the merging of the data flow values only if the contexts(call strings) are the same. At a call node $c_i$ , $c_i$ is appended to the call-string value at that point. Similarly at a return node the last call site ci is removed. And other data flow values are blocked. For non-recursive programs number of call strings are finite. For recursive programs, number of call strings can be infinite. However, the problem is decidable for finite lattices.
There is an approach of value based termination of call-strings. This method deals with creating equivalence classes. If two call-strings have same data flow values at the start node of the procedure, then they will produce the same data flow values at the return node of the procedure call. Such call strings are grouped into equivalence classes.
An very simple example of call strings method is shown below.\cite{mtpreport}
\begin{verbatim}
public static void main ()
{
B x;
// Callsites are appended to the current call string before making call f and g are pointers to nodes in a linked list.
C1 : x = after(f);
C2 : x = after(g);
}
B after(B a) // c1 [a = f] c2 [a = g]
{
return a.next;
// C1 [return f.next] C2 [return g.next]
}
// c1 [ object return would have its next pointed to f]
// c2 [ object returned would have its next field pointed to h]
\end{verbatim}
\subsection{Value Context Method}
In this method, a combination of tabulation (functional approach) and value based termination (call strings) approach is adopted. The call-stings are partitioned based on the data flow value at the call site. And then analysis of the procedure can then be performed once for each partition. It also combines the two views of contexts: data flow values at call site are stored as value contexts and call strings as calling contexts. Distinct data flow values are maintained for each context of a procedure.\cite{vasco} \\
A value context is defined by a particular data flow value reaching a procedure. It is used to enumerate and store the summary flow function of the procedure in terms of input and output pairs. In order to compute these pairs, data flow analysis is performed within the procedure for each context(input data flow value). The out value of each context is initialized with the top element. This approach also maintains a context transition table which allows flow of information along inter-procedurally valid paths. Transitions are recorded in the form (($X$,$c$) , $Y$) where $X$
represents calling context, $c$ represents call site and $Y$ represents callee context. Hence when analysis of callee procedure completes where to return is
identified by context transition table. Therefore propagation to valid path is guaranteed. \\
When a new call to a procedure is encountered, the context table pairs are consulted to decide if the procedure needs to be analyzed again. If it was already analysed once for the input value, output can be directly processed else a new context is created and the procedure is analysed for
this new context.
\section{Example of value context method}
\begin{figure}[here]
\begin{center}
\includegraphics[scale=0.5]{Figures/interproc_example.png}
\end{center}
\caption{Example of value-context based IP analysis}
\label{fig:extreme}
\end{figure}
This is an example of an inter-procedural heap liveness analysis. We wish to find out if a is live before and after the statement $C_1$ The procedure p is recursive and gets called with two different contexts $X_1$ and $X_2$ as shown in the context transition diagram as well. \\
The analysis starts with the intial context $X_0$ at statement e1 in main with the value null . The in value of $n_1$ is becomes $a$ as \emph{a.f} is being set to $t$ in the statement. On the next node $C_1$, we have a function call to $p$. So a new value context $X_1$ is created with input $a$ and value $null$ and the transition from X0 to X1 is noted. Now in $X_1$ context, node $n_3$ is processed followed by $c_2$, which creates another value context $X_2$ for procedure $p$ with input $null$ with value $null$. The transition $X_1$ to $X_2$ is also recorded in the context transition table. \\
In the context $X_2$, node $c_2$ is evaluated. Since there is already a context for input value $null$ we take its out value to be $null$ (the top value of the lattice). Thus, the in values of $C_2$ and $n_2$ are set to $null$ and $a$ respectively. Since $s_2$ does not kill the access path $a$, the output value of context $X_2$ is set to $a$. \\
The callers of $X_2$ are $X_1$ and $X_2$ itself. Both these contexts are now added to the work-list for processing. The only change for context $X_2$ is in the call statement $c_2$ whose in value is now set to $a$. On evaluating context $X_1$, we similarly get its output value to be $a$. After this step, the statement $C_1$ in context $X_0$ gets the in value assigned to be $a$, which gets killed in $s_1$. \\
%X3,c2 is picked up for processing and this time the correct exit value of X2 i.e u+v- is used and so the out value of X3,c2 becomes a-b+c-. However the values do not change for its successor. Thus no more nodes of X3 need to be expanded. The process continues with X1,c2. It's out value comes out to be a+b-c- as the exit value of X2 is u+v-. X1,n3 is then picked up. The sign of c is found out to be negative and this propagates to the end of the procedure. The exit values of X1 becomes a+b-c-. And now X0,c1 is added to the work list for processing and thus q is found out to be negative. \\
%
Value based contexts are used as a cache table for distinct call sites apart from terminating analysis of recursive procedures.
| {
"alphanum_fraction": 0.7863890495,
"avg_line_length": 106.9484536082,
"ext": "tex",
"hexsha": "2b992d9e98311e59e68c126dc53045752c2e3823",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a6b54fc995ec5a50f29746e8d2a9c2a87b0aec90",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "anshulp93/btp-report-slides",
"max_forks_repo_path": "Chapters/chapter3.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a6b54fc995ec5a50f29746e8d2a9c2a87b0aec90",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "anshulp93/btp-report-slides",
"max_issues_repo_path": "Chapters/chapter3.tex",
"max_line_length": 781,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a6b54fc995ec5a50f29746e8d2a9c2a87b0aec90",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "anshulp93/btp-report-slides",
"max_stars_repo_path": "Chapters/chapter3.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2296,
"size": 10374
} |
% Compile with:
% xelatex -file-line-error -halt-on-error -shell-escape lyse.tex
\documentclass[a4paper,11pt,titlepage]{article}
\include{header}
\begin{document}
\title{\texttt{lyse}: a data analysis system for process-as-you-go automated data analysis}
\author{Chris Billington}
\maketitle
\tableofcontents
\section{Introduction}\label{sec:concepts}
\lettrine[lines=3]{\texttt{l}}{\texttt{yse}} is a data analysis system which gets \emph{your code } running on experimental data as it is acquired. It is fundamenally based around the ideas of experimental \emph{shots} and analysis \emph{routines}. A shot is one trial of an experiment, and a routine is a \texttt{Python} script, written by you, that does something with the measurement data from one or more shots.
Analysis routines can be either \emph{single-shot} or \emph{multi-shot}. This determines what data and functions are available to your code when it runs. A single-shot routine has access to the data from only one shot, and functions available for saving results only to the hdf5 file for that shot. A a multi-shot routine has access to the entire dataset from all the runs that are currently loaded into \texttt{lyse}, and has functions available for saving results to an hdf5 file which does not belong to any of the shots---it's a file that exists only to save the `meta results'.
Actually things are far less magical than that. The only enforced difference between a single shot routine and a multi-shot routine is a single variable provided to your code when \texttt{lyse} runs it. Your code runs in a perfectly clean \texttt{Python} environment with this one exception: a variable in the global namespace called \texttt{path}, which is a path to an hdf5 file. If you have told \texttt{lyse} that your routine is a singleshot one, then this path will point to the hdf5 file for the current shot being analysed. On the other hand, if you've told \texttt{lyse} that your routine is a multishot one, then it will be the path to an h5 file that has been selected in \texttt{lyse} for saving results to.
The other differences listed above are conventions only\footnote{Though \texttt{lyse}'s design is based around the assumption that you'll follow these conventions most of the time}, and pertain to how you use the API that \texttt{lyse} provides, which will be different depending on what sort of analysis you're doing.
Here's a screenshot of \texttt{lyse}:
\begin{center}
\includegraphics[width=\textwidth]{gui.pdf}
\end{center}
\begin{enumerate}
\item Here's where single shot routines can be added and removed, with the plus and minus buttons. They will be executed in order on each shot (more on how that works shortly). They can be reordered, or enabled/disabled with the checkboxes on the left. The checkboxes to the right, underneath the plot icons don't currently do anything, but they are intended to provide control over how plots generated by the analysis routines are displayed and updated.
\item Here is where multi-shot routines can be added or removed. The file selection button at the top allows you to select what hdf5 file multi-shot routines will get given (to which they will save their results).
\item Allows pausing of analysis. \texttt{lyse} by default will run all single-shot routines on a shot when it arrives (either via the HTTP server or having been manually added). After all the shots have been processed, only then will the multi-shot routines be executed. So if you load ten shots in quickly, the multi-shot routines won't run until they've all been processed by the single-shot routines. However most of the time there will be sufficient delay in between shots arriving that multi-shot routines will be executed pretty much every time a new shot arrives.
\item If you want to re-run single-shot analyses on some shots, select them and click this button. They'll then be processed in order.
\item This will rerun all the multi-shot analyses.
\item Here is where shots appear, either having arrived over HTTP of having been added manually via the file browser (by clicking the plus button). Many columns will populate this part of the screen, one for each global and each of the results (as saved by single-shot routines) present in the shots. A high-priority planned feature is to be able to choose exactly which globals and results are displayed. Otherwise this display is overwhelming to the point of uselessness. The data displayed here represents the entirety of what is available to multi-shot routines via the API provided by \texttt{lyse}.
\item This is where the output of routines is displayed, errors in red. If you're putting \texttt{print} statements in your analysis code, here is where to look to see them. Likewise if there's an exception and analysis stops, look here to see why.
\end{enumerate}
\section{The \texttt{lyse} API}
\lettrine[lines=3]{S}{o great,} you've got a single filepath. What data analysis could you possibly do with that? It might seem like you have to still do the same amount of work that you would without an analysis system! Whilst that's not quite true, it's intentionally been designed that way so that you can run your code outside \texttt{lyse} with very little modification. Another motivating factor is to minimise the amount of magic black box behaviour, such that an analysis routine is actually just an ordinary \texttt{Python} script which makes use of an API designed for our purposes. \texttt{lyse} is both a program which executes your code, and an API that your code can call on.
To get started, you'll want to begin your analysis routine with:
\python{listing_1.py}
The \texttt{lyse} module\footnote{importing lyse imports the functions in \texttt{pythonlib/lyse/\_\_init\_\_.py}, whereas the main program is \texttt{pythonlib/lyse/main.pyw}} provides the following one function and two classes:
\begin{description}
\item[\texttt{data(filepath=None, host='localhost')}]. The \texttt{data} function when called with no arguments obtains the current dataset from a running instance of \texttt{lyse} on the same computer. It returns a \texttt{pandas} \texttt{DataFrame} with the same rows and columns as you see in the main program of \texttt{lyse}. This is a simple way to get at your data, that doesn't require at all that your code is being run from within \texttt{lyse}. You can simply open a python interactive session, type \texttt{from lyse import *; df = data()}, and begin pulling out columns and plotting them against each other. Callin \texttt{data()} this way is intended for pulling data for multi-shot analysis, and should be avoided in single-shot mode.
When called with the \texttt{host} argument, the \texttt{data} function instead connects to a running instance of \texttt{lyse} on that computer, downloading its \texttt{DataFrame} over the network. I'm planning on including automatic \texttt{SSH} tunnelling through \texttt{bec.physics} to allow for us to obtain our data from outside the lab subnet without the need for a VPN.
When called with the \texttt{filepath} argument, the \texttt{data} function instead returns a \texttt{pandas Series} object with the globals and results from just the h5 file specified. This is intended for use in single-shot mode, with the filepath being that single global variable that \texttt{lyse} implants into the namespace, as I mentioned in sec \ref{sec:concepts}.
\item[\texttt{Run(h5\_path)}]\footnote{There is another argument to this function---\texttt{no\_write=False}, but is is intended for use only internally by \texttt{Sequence}, which instantiates many runs but disables their functions for writing to file} Sometimes you need more than just the globals and results in single shot mode. In fact, you cannot produce any results without having access to measurement data---that is traces and images. Run objects provide methods for obtaining this data from an h5 file. They also provide methods for saving your results back to the same h5 file.
\begin{description}
\item[\texttt{t, V = Run.get\_trace(name)}] Returns an array of times and an array of voltages for an analogue input trace named \texttt{name}, as specified in a call to \texttt{AnalogIn.acquire} in \texttt{labscript}.
\item[\texttt{im = Run.get\_image(orientation,label,image)}] Returns an image (as an array) from the camera with specified orientation (eg \emph{side}, \emph{top}), image label (eg \emph{fluorescence}, \emph{absorption}), and specific image name (eg \emph{OD}, \emph{atoms}, \emph{flat}).
\item[\texttt{Run.save\_result(name,value)}] Saves a single-value result to the hdf5 file. The result will be saved as an attribute to the group \texttt{/results/} \texttt{your\_script's\_filename}, with the attribute \texttt{name} . Results saved in this way will be available to subsequent routines in the \texttt{DataFrame}s and \texttt{Series} returned by \texttt{data()} under the hierarchy \texttt{dataframe[your\_script's\_ filename, your\_result's\_name]}.
\item[\texttt{Run.save\_result\_array(name,data)}] This method saves an array which can be any numpy datatype convertible to hdf5 datatypes (which is pretty much any numpy array, including numpy `record' arrays---those are the ones with named columns). The array will be saved in a dataset under the group \texttt{/results/} \texttt{your\_script's\_filename}, with the dataset's name being \texttt{name}. It will not be accessible alongside globals and single-value results, but can be accessed with the \texttt{get\_result\_array} method.
\item[\texttt{arr = Run.get\_result\_array(group,name)}] This returns a numpy array as saved by the \texttt{save\_result\_array} function. The \texttt{group} argument specifies the name of the group that the result array was saved to within the results group of the hdf5 file. This will be then filename of the analysis routine which saved the result.
\item[\texttt{Run.set\_group(groupname)}] When running \texttt{Python} in interactive mode, the \texttt{Run} object can't know what filename to use as the hdf5 group name to which results are saved with \texttt{save\_result} and \texttt{save\_result\_array}. So if you try to instantiate a \texttt{Run} object in interactive mode, you'll be prompted to call this method to set what the group name should be instead.
\item[\texttt{t1, V1,...tn, Vn = Run.get\_trace(name\_1,..., name\_n)}]\
A convenience method for getting many traces at once.
\item[\texttt{Run.save\_results(name\_1,value\_1..., name\_n, value\_n)}]\
A convenience method for saving many results at once.
\item[\texttt{arr1,...arrn = Run.get\_result\_arrays(group, name\_1,..., name\_n)}]\
A convenience method for getting many result arrays at once, provided they are within the same group.
\item[\texttt{Run.save\_result\_arrays(name\_1,data\_1..., name\_n, data\_n)}]\
A convenience method for saving many result arrays at once.
\end{description}
\item[\texttt{Sequence(h5\_path,run\_paths)}] A \texttt{Sequence} object represents many runs. It provides methods for getting data from the runs, and for saving the results of multi-shot analyses to the file specified by \texttt{h5\_path}, which should be the filepath that \texttt{lyse} provides to your multi-shot analysis script. \texttt{run\_paths} should be a list of filepaths that you would like to be included in this \texttt{Sequence}. You can pull out these filenames from the \texttt{DataFrame} provided by the \texttt{data()} function with \texttt{df['filepaths']}. You might use this to pass in the filepaths for only a subset of the shots. You can also pass in the entire \texttt{DataFrame} as the \texttt{run\_paths}, and if it contains a column called \texttt{'filepaths'}, then those filepaths will be used.
\begin{description}
\item[\texttt{Sequence.runs}] The sequence object contains a \texttt{Run} object for each of the files in \texttt{run\_paths}. \texttt{Sequence.runs} is a dictionary of these \texttt{Run} objects, keyed by filepath. This dictionary is mainly for internal use by the \texttt{Sequence} object, but is included here in case you really do need to delve into the data from individual shots during a multi-shot routine. All methods of these \texttt{Run} objects that would write to their hdf5 file have been disabled.
\item[\texttt{Sequence.get\_result\_array}] Takes the same arguments as \texttt{Run.get\_result\_array}, and returns a dictionary of result arrays, one for each run, keyed by filepath.
\item[\texttt{Sequence.get\_trace}]Takes the same arguments as \texttt{Run.get\_result\_array}, and returns a dictionary of t, V tuples, one for each run, keyed by filepath.
\end{description}
The \texttt{Sequence} object also has the methods \texttt{save\_result}, \texttt{save\_result\_array}, \texttt{save\_results} and \texttt{save\_result\_arrays}, which work identically to equivalent methods in the \texttt{Run} object\footnote{\texttt{Sequence} is actually a subclass of \texttt{Run}}, the only difference being that you're saving the results to the h5 file associated with the \texttt{Sequence} object, rather than a file associated with a single shot.
\end{description}
\section{Examples}
\subsection{Single-shot example}
\python{listing_2.py}
\subsection{Multi-shot example}
\python{listing_3.py}
\end{document}
| {
"alphanum_fraction": 0.7792989318,
"avg_line_length": 114.6034482759,
"ext": "tex",
"hexsha": "7f50e029c63a3e8d01eb5b39bbfc61723eb8742a",
"lang": "TeX",
"max_forks_count": 24,
"max_forks_repo_forks_event_max_datetime": "2022-02-25T15:23:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-04-01T16:57:01.000Z",
"max_forks_repo_head_hexsha": "728a39a4eee6f5ecb77a851f2fca102cd14b65e3",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "labscript-suite-bitbucket-archive/zachglassman-lyse--forked-from--labscript_suite-lyse",
"max_forks_repo_path": "docs/lyse.tex",
"max_issues_count": 33,
"max_issues_repo_head_hexsha": "728a39a4eee6f5ecb77a851f2fca102cd14b65e3",
"max_issues_repo_issues_event_max_datetime": "2021-12-22T08:16:14.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-05-11T02:51:01.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "labscript-suite-bitbucket-archive/zachglassman-lyse--forked-from--labscript_suite-lyse",
"max_issues_repo_path": "docs/lyse.tex",
"max_line_length": 825,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "728a39a4eee6f5ecb77a851f2fca102cd14b65e3",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "labscript-suite-bitbucket-archive/zachglassman-lyse--forked-from--labscript_suite-lyse",
"max_stars_repo_path": "docs/lyse.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-03T04:22:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-02T10:28:17.000Z",
"num_tokens": 3239,
"size": 13294
} |
\section{Appendix: The Initial Dynamic Basis}
\label{init-dyn-bas-app}
We\index{76.1} shall indicate components of the initial basis by the subscript 0.
\insertion{\thelibrary}
The initial dynamic basis is $\B_0 = \F_0,\G_0,\E_0$,
where $\F_0 = \emptymap$, $\G_0 = \emptymap$ and $\E_0 = (\SE_0, \TE_0,\VE_0)$,
where $\SE_0 = \emptymap$, $\TE_0$ is shown in Figure~\ref{dynTE0.fig} and
\medskip
$\VE_0 = \{\boxml{=}\mapsto(\boxml{=},\isv),\,\boxml{:=}\mapsto(\boxml{:=},\isv),\,\boxml{Match}\mapsto(\boxml{Match},\ise), \,\boxml{Bind}\mapsto(\boxml{Bind},\ise),$\\
\vrule height0pt width21mm depth 0pt$\boxml{true}\mapsto(\boxml{true},\isc),\,\boxml{false}\mapsto(\boxml{false},\isc),\,$\\
\vrule height0pt width21mm depth 0pt$\boxml{nil}\mapsto(\boxml{nil},\isc),\,
\hbox{\boxml{::}}\mapsto(\hbox{\boxml{::}},\isc),\,
\hbox{\boxml{ref}}\mapsto(\hbox{\boxml{ref}},\isc)\}$.
\deletion{\thelibrary}{
The initial dynamic basis is
\[ \B_0\ =\ \F_0,\G_0,\E_0\]
where
\begin{itemize}
\item $\F_0\ =\ \emptymap$
\item $\G_0\ =\ \emptymap$
\item $\E_0\ =\ \E_0'+\E_0''$
\end{itemize}
$\E_0'$ contains bindings of identifiers to the basic values BasVal and
basic exception names \BasExc; in fact
~$\E_0'\ =\ \SE_0',\VE_0',\EE_0'$~, where:
\begin{itemize}
\item $\SE_0'\ =\ \emptymap$
\item $\VE_0' = \{{\it id}\mapsto{\it id}\ ;\ {\it id}\in\hbox{BasVal}\}
+\{\hbox{\boxml{:=}}\mapsto\hbox{\boxml{:=}}\} +\EE_0'$\\
\vrule height0pt width3mm depth 0pt$+\;\{\boxml{true}\mapsto\boxml{true},\,\boxml{false}\mapsto\boxml{false},\,\boxml{nil}\mapsto\boxml{nil},\,
\hbox{\boxml{::}}\mapsto\hbox{\boxml{::}},\,
\hbox{\boxml{ref}}\mapsto\hbox{\boxml{ref}}\}$
\item $\EE_0'\ =\ \{\id\mapsto\id\ ;\ \id\in$ \BasExc$\}$
\end{itemize}
}
\deletion{\thelibrary}{Note that $\VE_0'$ is the identity function on BasVal; this is because
we have chosen to denote these values by the names of variables
to which they are initially bound.
The semantics of these basic values (most of which are functions)
lies principally in their behaviour under APPLY, which we describe below.
On the other hand the semantics of \boxml{:=} is provided by a special
semantic rule, rule~\ref{assapp-dyn-rule}.
Similarly, $\EE_0'$ is the identity function on \BasExc, the set of
basic exception names, because we have also chosen
these names to be just those exception constructors to which they
are initially bound.
These exceptions are raised by APPLY as described below.
$\E_0''$ contains initial variable bindings which, unlike BasVal, are
definable in ML; it is the result of evaluating
the following declaration in the basis $\F_0,\G_0,\E_0'$. For convenience,
we have also included all basic infix directives in this declaration.
\begin{verbatim}
infix 3 o
infix 4 = <> < > <= >=
infix 5 @
infixr 5 ::
infix 6 + - ^
infix 7 div mod / *
fun (F o G)x = F(G x)
fun nil @ M = M
| (x::L) @ M = x::(L @ M)
fun s ^ s' = implode((explode s) @ (explode s'))
fun map F nil = nil
| map F (x::L) = (F x)::(map F L)
fun rev nil = nil
| rev (x::L) = (rev L) @ [x]
fun not true = false
| not false = true
fun ! (ref x) = x
\end{verbatim}
}
% We now\index{77.1} describe the effect of APPLY upon each value
% $b\in\BasVal$. For special values, we shall normally use $i$, $r$,
% $n$, $s$ to range over integers, reals, numbers (integer or real),
% strings respectively. We also take the liberty of abbreviating
% ``APPLY(\mbox{{\tt abs}}, $r$)'' to ``\mbox{{\tt abs}}($r$)'',
% ``APPLY({\tt mod}, $\{\boxml{1}\mapsto i,\boxml{2}\mapsto d\}$)'' to
% ``$i\ {\tt mod}\ d$'', etc. .
% \begin{itemize}
% \item \verb+~+($n$)~ returns the negation of $n$, or the
% packet ~[{\tt Neg}]~ if the result is out of range.
% \item \mbox{{\tt abs}}($n$)~ returns the absolute value of $n$, or
% the packet ~[{\tt Abs}]~ if the result is out of range.
% \item {\tt floor}($r$)~ returns the largest integer $i$ not greater than $r$;
% it returns the packet ~[{\tt Floor}]~ if $i$ is out of range.
% \item {\tt real}($i$)~ returns the real value equal to $i$.
% \item {\tt sqrt}($r$)~ returns the square root of $r$, or the packet
% ~[{\tt Sqrt}]~ if r is negative.
% \item {\tt sin}($r$)~, {\tt cos}($r$)~ return the result of the appropriate
% trigonometric functions.
% \item {\tt arctan}($r$)~ returns the result of the appropriate
% trigonometric function in the range $\underline{+}\pi/2$.
% \item {\tt exp}($r$)~, {\tt ln}($r$)~ return respectively the exponential
% and the natural logarithm of $r$, or an exception packet
% ~[{\tt Exp}]~ or ~[{\tt Ln}]~ if the result is out of range.
% \item {\tt size}($s$)~ returns the number of characters in $s$.
% \item {\tt chr}($i$)~ returns the character numbered $i$ (see Section~\ref{cr:speccon}) if $i$ is in the interval $[0,255]$, and the packet
% ~[{\tt Chr}]~ otherwise.
% \pagebreak
% \item {\tt ord}($s$)~ returns\index{78.0} the number of the first character
% in $s$ (an integer in the interval $[0,255]$, see Section~\ref{cr:speccon}),
% or the packet ~[{\tt Ord}]~ if $s$ is empty.
% \item {\tt explode}($s$)~ returns the list of characters (as single-character
% strings) of which $s$ consists.
% \item {\tt implode}($L$)~ returns the string formed by concatenating all members
% of the list $L$ of strings.
% \item The arithmetic\index{78.1} functions ~\verb+/+,\verb+*+,\verb-+-,\verb+-+~ all
% return the results of the usual
% arithmetic operations, or exception packets respectively
% [{\tt Quot}], [{\tt Prod}], [{\tt Sum}], [{\tt Diff}]
% if the result is undefined or out of range.
% \item $i\ {\tt mod}\ d$~,~$i\ {\tt div}\ d$~ return integers $r,q$
% (remainder, quotient) determined by the equation $d\times q +r=i$,
% where either $0\leq r<d$ or $d<r\leq 0$. Thus the remainder has the
% same sign as the divisor $d$. The packet [{\tt Mod}] or
% [{\tt Div}] is returned if $d=0$.
% \item The order relations ~\verb+<+,\verb+>+,\verb+<=+,\verb+>=+~ return
% boolean values in accord with their usual meanings.
% \item $v_1$\verb+ = +$v_2$~ returns {\TRUE} or {\FALSE} according as
% the values $v_1$ and $v_2$ are, or are not, identical.
% The type discipline (in particular, the fact that function types
% do not admit equality) ensures that equality is only ever applied
% to special values, nullary constructors, addresses, and values
% built out of such by record formation and constructor application.
% %version 2:\item $v_1$\verb+ = +$v_2$~ returns the boolean value of $v_1=v_2$,
% % where the equality of values (=) is defined recursively as follows:
% % \begin{itemize}
% % \item If $v_1,v_2$ are constants (including nullary constructors) or
% % addresses, then $v_1=v_2$ iff $v_1$ and $v_2$ are identical.
% % \item $(\con_1,v_1)=(\con_2,v_2)$ iff $\con_1,\con_2$ are identical and
% % $v_1=v_2$.
% % \item $r_1=r_2$ (for records $r_1,r_2$) iff $\Dom r_1=\Dom r_2$ and, for
% % each $\lab\in\Dom r_1$, $r_1(\lab)=r_2(\lab)$.
% % \end{itemize}
% % The type discipline (in particular, the fact that function types
% % do not admit equality) makes it unnecessary to specify equality in
% % any other cases.
% \item $v_1$\verb+ <> +$v_2$ returns the opposite boolean value to
% $v_1$\verb+ = +$v_2$.
% \end{itemize}
% It remains to define the effect of APPLY upon basic values concerned with
% input/output; we therefore proceed to describe the ML input/output system.
%
% Input/Output in ML uses the concept of a {\sl stream}. A stream is a finite or
% infinite sequence of characters; if finite, it may or may not be terminated.
% (It may be convenient to think of a special end-of-stream character
% signifying termination, provided one realises that this ``character'' is
% never treated as data).
% Input streams -- or {\sl instreams} --
% are of type ~\INSTREAM~ and will be denoted by ~$is$~;
% output streams -- or {\sl outstreams} -- are of type ~\OUTSTREAM~ and will
% be denoted by ~$os$~. Both these types of stream are {\sl abstract}, in the
% sense that streams may only be manipulated by the functions provided in
% BasVal.
%
% Associated with an instream is a {\sl producer}, normally an I/O device or
% file; similarly an outstream is associated with a {\sl consumer}. After this
% association has been established -- either initially or by the ~{\tt open\_in}~
% or ~{\tt open\_out}~ function -- the stream acts as a vehicle for character
% transmission from producer to program, or from program to consumer.
% The association can be broken by the ~{\tt close\_in}~ or ~{\tt close\_out}~
% function.\index{79.1}
% A closed stream permits no further character transmission; a closed
% instream is equivalent to one which is empty and terminated.
% \pagebreak
%
% There\index{79.1.1} are two streams in BasVal:
% \begin{itemize}
% \item {\tt std\_in}: an instream whose producer is the terminal.
% \item {\tt std\_out}: an outstream whose consumer is the terminal.
% \end{itemize}
% The other basic values concerned with Input/Output are all functional, and
% the effect of APPLY upon each of them given below. We take the
% liberty of abbreviating ``APPLY({\tt open\_in}, $s$)'' to
% ``{\tt open\_in}($s$)''
% etc., and
% we shall use ~$s$~ and ~$n$~ to range over strings and integers
% respectively.
% \begin{itemize}
% \item {\tt open\_in}($s$)~ returns a new instream ~$is$~, whose producer is
% the external file named ~$s$~. It returns exception packet
% \begin{quote}
% [$(${\tt Io},\verb+"Cannot open +$s$\verb+"+$)$]
% \end{quote}
% if file ~$s$~ does not exist or does not provide read access.
% \item {\tt open\_out}($s$)~ returns a new outstream ~$os$~, whose consumer is
% the
% external file named ~$s$~. If file $s$ is non-existent, it is taken to
% be initially empty. \ insertion{\thecommentary}{Any existing contents of the file $s$ are
% lost. The exception packet
% \begin{quote}
% [$(${\tt Io},\boxml{"Cannot open }$s$\boxml{"}$)$]
% \end{quote}
% is returned if write access to the file ~$s$~ is not provided.}
% \item {\tt input}($is,n$)~ returns a string ~$s$~ containing the first $n$
% characters of ~$is$~, also removing them from ~$is$~. If only $k<n$
% characters are available on ~$is$~, then
% \begin{itemize}
% \item If ~$is$~ is terminated after these $k$ characters, the
% returned string
% ~$s$~ contains them alone, and they are removed from ~$is$~.
% \item Otherwise no result is returned until the producer of ~$is$~
% either supplies $n$ characters or terminates the stream.
% \end{itemize}
% \item {\tt lookahead}($is$)~ returns a single-character string ~$s$~ containing
% the next character of ~$is$~, without removing it. If no character is
% available on ~$is$~ then
% \begin{itemize}
% \item If ~$is$~ is closed, the empty string is returned.
% \item Otherwise no result is returned until the producer of ~$is$~
% either supplies a character or closes the stream.
% \end{itemize}
% \item {\tt close\_in}($is$)~ empties and terminates the instream ~$is$~ .
% \item {\tt end\_of\_stream}($is$)~ returns {\TRUE} if
% ~{\tt lookahead}($is$)~ returns the empty string, {\FALSE} otherwise;
% it detects the end of the instream ~$is$~.
% \item {\tt output}($os,s$)~ writes the characters of ~$s$~ to the outstream
% ~$os$~, unless ~$os$~ is closed, in which case it returns the exception
% packet
% \begin{quote}
% [$(${\tt Io},\verb+"Output stream is closed"+$)$]
% \end{quote}
% \item {\tt close\_out}($os$)~ terminates the outstream ~$os$~.\index{79.2}
% \end{itemize}
% %\end{document}
%
% % Constructors
% %\newcommand{\FALSE}{\mbox{\tt false}}
% %\newcommand{\TRUE}{\mbox{\tt true}}
% %\newcommand{\NIL}{\mbox{\tt nil}}
% %\newcommand{\REF}{\mbox{\tt ref}}
% %\newcommand{\UNIT}{\mbox{\tt unit}}
%
% % Basic Values BasVal
% %\newcommand{\STDIN}{\mbox{\tt std\_in}}
% %\newcommand{\STDOUT}{\mbox{\tt std\_out}}
\insertion{\thedatatyperepl}{
\begin{figure}[h]
\begin{center}
\begin{tabular}{|rll|}
\hline
$\tycon$ & $\mapsto$ & $\{\vid_1\mapsto(\V_1,\is_1),\ldots,\vid_n\mapsto(\V_n,\is_n)\}\quad (n\geq0)$\\
\hline
\UNIT & $\mapsto $ & $\emptymap$ \\
\BOOL & $\mapsto $ & $\{\TRUE\mapsto(\TRUE,\isc),
\ \FALSE\mapsto(\FALSE,\isc)\}$\\
\INT & $\mapsto $ & $\{\}$\\
\WORD & $\mapsto $ & $\{\}$\\
\REAL & $\mapsto $ & $\{\}$\\
\STRING & $\mapsto $ & $\{\}$\\
%\UNISTRING & $\mapsto $ & $\{\}$\\
\CHAR & $\mapsto $ & $\{\}$\\
%\UNICHAR & $\mapsto $ & $\{\}$\\
\LIST & $\mapsto $ & $\{\NIL\mapsto(\NIL,\isc),\ml{::}\mapsto(\ml{::},\isc)\}$\\
\REF & $\mapsto $ & $\{\REF\mapsto(\REF,\isc)\}$\\
\EXCN & $\mapsto $ & $\emptymap$\\
\hline
\end{tabular}
\end{center}
\caption{Dynamic $\TE_0$}
\label{dynTE0.fig}
\end{figure}}
| {
"alphanum_fraction": 0.6132019558,
"avg_line_length": 48.7292418773,
"ext": "tex",
"hexsha": "d4b30ac745272ce52f13e7709d7cca526241f1c2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8aaa3f3c23be43210be064cd0c0bf4c56c6c50cf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "baguette/emblem-sandbox",
"max_forks_repo_path": "doc/definition/app4.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8aaa3f3c23be43210be064cd0c0bf4c56c6c50cf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "baguette/emblem-sandbox",
"max_issues_repo_path": "doc/definition/app4.tex",
"max_line_length": 169,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "8aaa3f3c23be43210be064cd0c0bf4c56c6c50cf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "baguette/emblem-sandbox",
"max_stars_repo_path": "doc/definition/app4.tex",
"max_stars_repo_stars_event_max_datetime": "2016-01-11T20:01:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-01-11T20:01:15.000Z",
"num_tokens": 4377,
"size": 13498
} |
\documentclass{goose-article}
\usepackage{lipsum}
% \usepackage{lineno}
% \linenumbers
\title{goose-article: customized \LaTeX-article}
\author[1]{T.W.J.~de~Geus$^{*,}$}
\affil[1]{
Physics Institute,
\'{E}cole Polytechnique F\'{e}d\'{e}rale de Lausanne (EPFL) \nl
Switzerland}
\contact{
$^*$Contact:
\href{mailto:[email protected]}{[email protected]}
\hspace{1mm}--\hspace{1mm}
\href{http://www.geus.me}{www.geus.me}}
\hypersetup{pdfauthor={T.W.J. de Geus}}
\header{goose-article}
\begin{document}
\maketitle
\begin{abstract}
\noindent
\lipsum[1]
\keywords{\LaTeX-style; example}
\end{abstract}
\section{Introduction}
\lipsum[2-4] \citep{Geus10,Geus11,Geus12,Geus13,Geus14,Geus15,Geus16,Geus17}
\begin{equation}
f(x) = x^2
\end{equation}
\lipsum[5-6]
\begin{figure}[htp]
\centering
\includegraphics[width=.5\linewidth]{example-image-a}
\caption{Caption here}
\label{fig:a}
\end{figure}
\section{Body}
\lipsum[7-12] \citet{Geus10,Geus11,Geus12,Geus13,Geus14,Geus15,Geus16,Geus17,Geus19} ...
\begin{figure}[htp]
\centering
\includegraphics[width=.5\linewidth]{example-image-b}
\caption{Caption here}
\label{fig:b}
\end{figure}
\bibliographystyle{unsrtnat_hyperlink}
\bibliography{refs}
\end{document}
| {
"alphanum_fraction": 0.6824902724,
"avg_line_length": 18.3571428571,
"ext": "tex",
"hexsha": "274ce2ed3b9349ee94f3eaf74e720573e6cda749",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "02c76c3297a098f8dea94624775a5f2e98b7e70e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tdegeus/goose-article",
"max_forks_repo_path": "examples/basic/example.tex",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "02c76c3297a098f8dea94624775a5f2e98b7e70e",
"max_issues_repo_issues_event_max_datetime": "2022-02-08T17:11:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-12-14T15:42:55.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tdegeus/goose-article",
"max_issues_repo_path": "examples/basic/example.tex",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "02c76c3297a098f8dea94624775a5f2e98b7e70e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tdegeus/goose-article",
"max_stars_repo_path": "examples/basic/example.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 479,
"size": 1285
} |
\subsubsection{{{qname}}}
\label{{sec:{qname}}}
{comment}
{structure}
As shown in \ref{{fig:{class_name}}}, the \sbol{{{class_name}}} class includes the following properties: {properties}.
\begin{{figure}}[ht]
\begin{{center}}
\includegraphics[scale=0.6]{{sbol_classes/{class_name}}
\caption[]{{Diagram of the \sbol{{{class_name}}} abstract class and its associated properties}}
\label{{fig:{class_name}}}
\end{{center}}
\end{{figure}}
\begin{itemize}
\item \label{sec:sbol:displayId}
The \sbol{displayId} property is an OPTIONAL identifier with a data type of \paml{string} (and REQUIRED for objects with URL identifiers). This property is intended to be an intermediate between a URI and the \sbol{name} property that is machine-readable, but more human-readable than the full URI of an object.
If set, its \paml{string} value MUST be composed of only alphanumeric or underscore characters and MUST NOT begin with a digit.
\item \label{sec:sbol:name}
The \sbol{name} property is OPTIONAL and has a data type of \paml{string}. This property is intended to be displayed to a human when visualizing an \sbol{Identified} object.
If an \sbol{Identified} object lacks a name, then software tools SHOULD instead display the object's \sbol{displayId} or URI.
\item \label{sec:sbol:description}
The \sbol{description} property is OPTIONAL and has a data type of \paml{string}. This property is intended to contain a more thorough text description of an \sbol{Identified} object.
\item \label{sec:prov:wasDerivedFrom}
The \prov{wasDerivedFrom} property MAY contain any number of \paml{URI}s. This property is defined by the PROV-O ontology and is located in the \url{https://www.w3.org/ns/prov#} namespace.
\item \label{sec:prov:wasGeneratedBy}
The \prov{wasGeneratedBy} property MAY contain any number of \paml{URI}s. This property is defined by the PROV-O ontology and is located in the \url{https://www.w3.org/ns/prov#} namespace.
\item \label{sec:sbol:hasMeasure}
The \sbol{hasMeasure} property MAY contain any number of \paml{URI}s, each of which refers to a \om{Measure} object that describes a measured parameter for this object.
\end{itemize}
\subsubsection{sbol:TopLevel}
\label{sec:sbol:TopLevel}
\sbol{TopLevel} is an abstract class that is extended by any \sbol{Identified} class that can be found at the top level of a PAML or SBOL document or file.
In other words, \sbol{TopLevel} objects are never nested inside of any other object as a child object.
The \sbol{TopLevel} classes defined in PAML are \paml{Protocol} and \paml{Primitive}.
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=0.6]{sbol_classes/toplevel}
\caption[]{Classes that inherit from the \sbol{TopLevel} abstract class.}
\label{uml:toplevel}
\end{center}
\end{figure}
\begin{itemize}
\item \label{sec:sbol:hasNamespace}
The \sbol{hasNamespace} property is REQUIRED and MUST contain a single \paml{URI} that defines the namespace portion of URLs for this object and any child objects.
If the URI for the \sbol{TopLevel} object is a URL, then the URI of the \sbol{hasNamespace} property MUST prefix match that URL.
\item
\label{sec:sbol:hasAttachment}
The \sbol{hasAttachment} property MAY have any number of \paml{URI}s, each referring to an \external{sbol:Attachment} object.
\end{itemize}
\subsubsection{sbol:Component}
\label{sec:sbol:Component}
The \sbol{Component} class represents the structural and/or functional entities of a biological design.
In PAML, this is primarily used to represent the design of experimental samples as combinations of entities such as strains, genetic constructs, media, inducers, etc.
As shown in \ref{uml:component}, the \sbol{Component} class describes a design entity using a number of different properties.
In many PAML usages, however, a \sbol{Component} will simply be used as a pointer to an external description of a material to be manipulated, and the only property required for interpreting PAML will be \sbolmult{type:C}{type}.
\begin{figure}[ht]
\begin{center}
\includegraphics[scale=0.6]{sbol_classes/component}
\caption[]{Diagram of the \sbol{Component} class and its associated properties.}
\label{uml:component}
\end{center}
\end{figure}
\begin{itemize}
\item \label{sec:sbol:type:C}
The \sbolmult{type:C}{type} property MUST have one or more \paml{URI}s specifying the category of biochemical or physical entity (for example DNA, protein, or simple chemical) that a \sbol{Component} object represents.
\item \label{sec:sbol:hasFeature}
The \sbol{hasFeature} property MAY have any number of \paml{URI}s, each referencing a \sbol{Feature} object. Each \sbol{Feature} represents a specific occurrence of a part, subsystem, or other notable aspect within that design, such as an ingredient in the composition of a growth medium.
\\{\em This is not typically required for specifying protocols in PAML.}
\item \label{sec:sbol:role:C}
The \sbolmult{role:C}{role} property MAY have any number of \paml{URI}s, which MUST identify terms from ontologies that are consistent with the \sbolmult{type:C}{type} property of the \sbol{Component}.
\\{\em This is not typically required for specifying protocols in PAML.}
\item \label{sec:sbol:hasSequence:C}
The \sbolmult{hasSequence:C}{hasSequence} property MAY have any number of \paml{URI}s, each referencing a \external{sbol:Sequence} object. These objects define the primary structure or structures of the \sbol{Component}.
\\{\em This is not typically required for specifying protocols in PAML.}
\item \label{sec:sbol:hasConstraint}
The \sbol{hasConstraint} property MAY have any number of \paml{URI}s, each referencing a \external{sbol:Constraint} object.
These objects describe, among other things, any restrictions on the relative, sequence-based positions and/or orientations of the \sbol{Feature} objects contained by the \sbol{Component}, as well as spatial relations such as containment and identity relations.
\\{\em This is not typically required for specifying protocols in PAML.}
\item \label{sec:sbol:hasInteraction}
The \sbol{hasInteraction} property MAY have any number of \paml{URI}s, each referencing an \external{sbol:Interaction} object describing a behavioral relationship between \sbol{Feature}s in the \sbol{Component}.
\\{\em This is not typically required for specifying protocols in PAML.}
\item \label{sec:sbol:hasInterface}
The \sbol{hasInterface} property is OPTIONAL and MAY have a \paml{URI} referencing an \external{sbol:Interface} object that indicates the inputs, outputs, and non-directional points of connection to a \sbol{Component}.
\\{\em This is not typically required for specifying protocols in PAML.}
\item \label{sec:sbol:hasModel}
The \sbol{hasModel} property MAY have any number of \paml{URI}s, each referencing a \external{sbol:Model} object that links the \sbol{Component} to a computational model in any format.
\\{\em This is not typically required for specifying protocols in PAML.}
\end{itemize}
\subsection{Ontology of Units of Measure}
In most cases where a number is needed in PAML, that number is a measure with units associated with it.
The Ontology of Units of Measure (OM) (\url{http://www.ontology-of-units-of-measure.org/resource/om-2}) already defines a data model for representing measures and their associated units.
A subset of OM, already adopted by SBOL, is used for this purpose by PAML as well.
The key class used is \om{Measure}, which associates a number with a unit and a biology-related property.
In most cases, it should be possible to use one of the \external{om:Unit} instances already defined by OM; when this is not possible, an appropriate unit can be defined using \external{om:Unit} and \external{om:Prefix} classes.
\subsubsection{om:Measure} \label{sec:om:Measure}
The purpose of the \om{Measure} class is to link a numerical value to a \external{om:Unit}.
\begin{itemize}
\item \label{sec:om:hasNumericalValue}
The \om{hasNumericalValue} property is REQUIRED and MUST contain a single \paml{float}.
\item \label{sec:om:hasUnit:Measure}
The \ommult{hasUnit:Measure}{hasUnit} property is REQUIRED and MUST contain a \paml{URI} that refers to a \external{om:Unit}.
\item \label{sec:sbol:type:Measure}
The \sbolmult{type:Measure}{type} property MAY contain any number of \paml{URI}s. It is RECOMMENDED that one of these \paml{URI}s identify a term from the Systems Description Parameter branch of the Systems Biology Ontology (SBO) (\url{http://www.ebi.ac.uk/sbo/main/}). This \sbolmult{type:Measure}{type} property was added by SBOL to describe different types of parameters
(for example, rate of reaction is identified by the SBO term \url{http://identifiers.org/SBO:0000612}).
\end{itemize}
\subsection{Recommended Ontologies for External Terms}
\label{sec:recomm_ontologies}
External ontologies and controlled vocabularies are an integral part of SBOL and thus used by PAML as well. SBOL uses \paml{URI}s to access existing biological information through these resources.
Although RECOMMENDED ontologies have been indicated in relevant sections where possible, other resources providing similar terms can also be used. A summary of these external sources can be found in \ref{tbl:preferred_external_resources}.
The URIs for ontological terms SHOULD come from \url{identifiers.org}. However, it is acceptable to use terms from \url{purl.org} as an alternative, for example when RDF tooling requires URIs to be represented as compliant QNames, and software may convert between these forms as required.
\begin{table}[htp]
\begin{edtable}{tabular}{p{2cm}p{1.5cm}p{5cm}p{6cm}}
\toprule
\textbf{SBOL Entity} & \textbf{Property} & \textbf{Preferred External Resource}
& \textbf{More Information} \\
\midrule
\textbf{Component} & type & SBO (physical entity branch)& \url{http://www.ebi.ac.uk/sbo/main/}\\
& type & SO (nucleic acid topology)& \url{http://www.sequenceontology.org}\\
& role & SO (\textit{DNA} or \textit{RNA}) & \url{http://www.sequenceontology.org} \\
& role & CHEBI (\textit{small molecule}) & \url{https://www.ebi.ac.uk/chebi/} \\
& role & PubChem (\textit{small molecule}) & \url{https://pubchem.ncbi.nlm.nih.gov/} \\
& role & UniProt (\textit{protein}) & \url{https://www.uniprot.org/} \\
& role & NCIT (\textit{samples}) & \url{https://ncithesaurus.nci.nih.gov/} \\
\textbf{om:Measure} & type & SBO (systems description parameters) &
\url{http://www.ebi.ac.uk/sbo/main/} \\
\bottomrule
\end{edtable}
\caption{Preferred external resources from which to draw values for various SBOL properties.}
\label{tbl:preferred_external_resources}
\end{table}
| {
"alphanum_fraction": 0.759414616,
"avg_line_length": 63.1058823529,
"ext": "tex",
"hexsha": "10bc35bc8f1a22f9cce510ac223c266579b1f56f",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-11-23T20:05:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-23T20:05:04.000Z",
"max_forks_repo_head_hexsha": "91e47e6f447cfd1040ba87152f4fb8f907c026fa",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SynBioDex/sbol_factory",
"max_forks_repo_path": "sbol_factory/property_definition.tex",
"max_issues_count": 47,
"max_issues_repo_head_hexsha": "91e47e6f447cfd1040ba87152f4fb8f907c026fa",
"max_issues_repo_issues_event_max_datetime": "2022-03-27T00:46:04.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-04-05T21:05:38.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SynBioDex/sbol_factory",
"max_issues_repo_path": "sbol_factory/property_definition.tex",
"max_line_length": 374,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "91e47e6f447cfd1040ba87152f4fb8f907c026fa",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SynBioDex/sbol_factory",
"max_stars_repo_path": "sbol_factory/property_definition.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-23T13:03:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-25T11:33:20.000Z",
"num_tokens": 2831,
"size": 10728
} |
%\documentclass[10pt]{beamer} % aspect ratio 4:3, 128 mm by 96 mm
\documentclass[10pt,aspectratio=169]{beamer} % aspect ratio 16:9, only frames
%\documentclass[10pt,aspectratio=169,notes]{beamer} % aspect ratio 16:9, frames+notes
%\documentclass[10pt,aspectratio=169,notes=only]{beamer} % aspect ratio 16:9, notes only
\usepackage{pgfpages}
%\setbeameroption{show notes}
%\setbeameroption{show notes on second screen=right}
%\setbeameroption{show notes on second screen=bottom} % does not work for animations
%\graphicspath{{../../figures/}}
\graphicspath{{figs/}{figs/longitudinal-wave-png/}}
%\includeonlyframes{frame1,frame2,frame3}
%\includeonlyframes{frame10}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Packages
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\usepackage{appendixnumberbeamer}
\usepackage{booktabs}
\usepackage{csvsimple} % for csv read
\usepackage[scale=2]{ccicons}
\usepackage{pgfplots}
\usepackage{xspace}
\usepackage{amsmath}
\usepackage{totcount}
\usepackage{tikz}
\usepackage{bm}
%\usepackage{FiraSans}
\usepackage{mathrsfs} % for Fourier and Laplace symbols
\usepackage{verbatim}
%\usepackage{eulervm} % alternative math fonts
%\usepackage{comment}
\usetikzlibrary{external} % speedup compilation
%\tikzexternalize % activate!
%\usetikzlibrary{shapes,arrows}
% the animations are only supported by some pdf readers (AcrobatReader, PDF-XChange, acroread, and Foxit Reader)
\usepackage{animate}
%\usepackage[draft]{animate}
\usepackage{ifthen}
\newcounter{angle}
\setcounter{angle}{0}
%\usepackage{bibentry}
%\nobibliography*
\usepackage{caption}%
\captionsetup[figure]{labelformat=empty}%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Metropolis theme custom modification file
\input{metropolis_mods.tex}
%\usefonttheme[onlymath]{Serif}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Custom commands
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% matrix command
%\newcommand{\matr}[1]{\mathbf{#1}} % bold upright (Elsevier, Springer)
% metropolis compatible (FiraSans auto replacement)
\newcommand{\matr}[1]{\boldsymbol{#1}}
%\newcommand{\matr}[1]{#1} % pure math version
%\newcommand{\matr}[1]{\bm{#1}} % ISO complying version
% vector command
%\newcommand{\vect}[1]{\mathbf{#1}} % bold upright (Elsevier, Springer)
% metropolis compatible (FiraSans auto replacement)
\newcommand{\vect}[1]{\boldsymbol{#1}}
% bold symbol
\newcommand{\bs}[1]{\boldsymbol{#1}}
% derivative upright command
\DeclareRobustCommand*{\drv}{\mathop{}\!\mathrm{d}}
\newcommand{\ud}{\mathrm{d}}
\newcommand{\myexp}{\mathrm{e}}
%
\newcommand{\themename}{\textbf{\textsc{metropolis}}\xspace}
\renewcommand{\Re}{\operatorname{\mathbb{R}e}}
\renewcommand{\Im}{\operatorname{\mathbb{I}m}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Title page options
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \date{\today}
\date{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% option 1
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\title{Fundamentals of elastic wave propagation phenomenon}
\subtitle{Lecture Series}
\author{\textbf{Paweł Kudela} }
% logo align to Institute
\institute{Institute of Fluid Flow Machinery\\Polish Academy of Sciences \\ \vspace{-1.5cm}\flushright \includegraphics[width=4cm]{//odroid-sensors/sensors/MISD_shared/logo/logo_eng_40mm.eps}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\tikzexternalize % activate!
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\maketitle
%%%%%%%%%%%%%%%%%%%%
\note{Welcome. My name is Pawel Kudela.
I am an associate professor at the Institute of Fluid Machinery, Polish Academy of Sciences.
Today I will be talking about waves with the focus on elastic wave propagation phenomenon. I will talk about its usefulness in practical applications.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% SLIDES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[allowframebreaks]{Table of contents}
\setbeamertemplate{section in toc}[sections numbered]
\tableofcontents[hideallsubsections]
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Types of wave}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[label=frame2]{Types of wave}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{equation*}
\frac{\partial^2 \Psi(x,t)}{\partial t^2} = \upsilon_p^2 \, \frac{\partial^2 \Psi(x,t)}{\partial x^2}
\end{equation*}
\begin{figure}
\includegraphics[width=0.9\textwidth]{wave_types.png}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
This is the simples form of the wave equation. It is a second partial derivatives of physical quantity in respect to time equal to coefficient \(\upsilon_p\) square times a second partial derivative in respect to space. The coefficient \(\upsilon_p\) usually refers to wave velocity.
This equation can describe the problem of taut string, sound waves, electromechanical waves and longitudinal waves propagating in a rod.
Of course psi has different quantity for each penomenon; for vibrating taut string it will be the displacement, for sound waves it will be the pressure.
Electromagnetic waves are created by a fusion of electric and magnetic fields.
Whereas propagating waves in a rod will be considered in terms of longitudinal displacements.
It is interesting to note that electromagnetic waves do not need a medium to travel.
It is also important to mention that propagating waves transmits energy from one point to another but without transporting a mass.
Propagating waves can also be used for sending information.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Types of wave}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Various classifications of elastic waves can be used.
Based on oscillation form:
\begin{itemize}
\item standing waves
\item propagating (progressing) waves
\end{itemize}
Based on analysed physical quantity:
\begin{itemize}
\item electromagnetic waves (Microwaves, X-ray, radio waves, ultraviolet waves)
\item mechanical waves (water waves, ultrasonic waves, elastic waves, guided waves)
\item matter waves
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{\footnotesize
There are many types of waves that it is easy to get confused.
That is why we will start with the basic principles first and we will progress then towards more complex problems.
Based on oscilation form we can divide waves into standing waves and propagating or progressing waves.
An example of standing waves would be vibrating guitar string or structure vibrating at harmonic frequency.
An example of propagating waves will be sound wave or seismic wave.
Based on analysed physical quantity we can distinguish electromagnetic waves, mechanical waves and matter waves.
All light waves such as Microwaves, X-ray, radio waves, ultraviolet waves are examples of electromagnetic waves.
They can propagate through a vacuum.
In opposite to electromagnetic waves, mechanical waves are not capable of transmitting its energy through the vacuum.
Mechanical waves require a medium in order to transport their energy from one location to another. It has a form of oscilations of particles in a medium.
Elastic and guided waves, on which I will elaborate more, belongs to the group of propagating mechanical waves.
Matter waves sometimes are called de Broglie Matter Waves after the name of scientist who proposed a new speculative hypothesis that electrons and other particles of matter can behave like waves.
Now it is a central part of the theory of quantum mechanics, being an example of wave–particle duality.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Types of wave}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Acoustic waves based on wave frequency can be divided into:
\begin{itemize}
\item infrasonic waves: 0.001 Hz - 20 Hz (earthquakes monitoring)
\item sound waves (audible): 20 Hz - 20 kHz
\item ultrasonic waves (non-audible for humans): above 20 kHz
\begin{itemize}
\item chemistry (sonication, ultrasonication, mixing, production of nanoparticles)
\item food processing (mixing, homogenization, emulsification): 20 kHz - 1MHz
\item medical applications (e.g. ultrasonic nebulizer) and non-destructive testing 20 kHz - 2 MHz
\item diagnostics and NDE 2 MHz - 200 MHz
\end{itemize}
\end{itemize}
\vspace{10mm}
\begin{figure}
\includegraphics[width=0.9\textwidth]{Sound_wave_spectrum.png}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
Our body's physical sensors respond to oscillations with a well defined wavelength and frequency — We personally detect two kinds of waves in our environment to help us build a picture of the world we live in: sound and light.
Our detectors for these waves — ears and eyes — are evolved to respond differently to different frequencies, giving us perception of pitch and color.
As a result, an analysis in signals in terms of sinusoidal oscillations tends to make sense to us.
Voice frequency band ranges from approximately 300 to 3400 Hz.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Elastic waves}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{alertblock}{Elastic waves}
Elastic waves are mechanical waves propagating in an elastic medium as an effect of forces associated with volume deformation (compression and extension) and shape deformation (shear) of medium elements.
\end{alertblock}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Basic definitions}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Travelling line}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\only<1->{
\begin{equation*}
y=\frac{1}{3} x
\end{equation*}
}
\only<2->{
\begin{equation*}
x \rightarrow x - 6 t
\end{equation*}
}
\only<3->{
\begin{equation*}
y=\frac{1}{3} (x-6 t)
\end{equation*}
}
\only<4->{
\begin{equation*}
\color{logoblue}
t=0 \quad y=\frac{1}{3} x
\end{equation*}
}
\only<5->{
\begin{equation*}
\color{green}
t=1 \quad y=\frac{1}{3} (x-2)
\end{equation*}
}
\end{column}
\begin{column}{0.5\textwidth}
\only<1-2>{
\includegraphics[width=0.9\textwidth]{travelling_line_1.png}
}
\only<3-4>{
\includegraphics[width=0.9\textwidth]{travelling_line_2.png}
}
\only<5-6>{
\includegraphics[width=0.9\textwidth]{travelling_line_3.png}
}
\only<6>{
\begin{flalign*}
&+ \textrm{direction} \quad x \rightarrow x - \upsilon t\\
&- \textrm{direction} \quad x \rightarrow x + \upsilon t
\end{flalign*}
}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{We will start with very simple equation: y equal one third of x which is the equation of stright line as it is shown here.
Let's suppose that I want this line to move,
Move it with a speed of 6 meters per second.
All I have to do now is to replace the x in the equation by x minus six t.
Notice the minus sign, it means that our line will go to plus x direction.
The equation becomes: y equal one third of x minus six t.
And at the time t equal zero you already have the line.
At t equal one the line is parallel and it moved in this direction with the speed of six meters per second.
So it is telling us that if we ever wanted to move with the speed v in the plus direction all we have to do is replace in our equation x with x minus v times t
and if we want to move in minus x direction we need to replace x with x minus v times t.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Travelling wave}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\only<1>{
\begin{equation*}
y=2 \sin (3 x)
\end{equation*}
}
\only<2->{
\begin{equation*}
y=2 \sin (\underbrace{3}_{k} x)
\end{equation*}
}
\only<3->{
\begin{equation*}
x \rightarrow x - 6 t
\end{equation*}
}
\only<4->{
\begin{equation*}
y=2 \sin \left(\underbrace{3}_{k} (x -\underbrace{6}_{\upsilon} t) \right)
\end{equation*}
}
\only<5->{
\begin{equation*}
\color{logoblue}
t=0 \quad y=2 \sin (3 x)
\end{equation*}
}
\only<6->{
\begin{equation*}
\color{green}
t=1 \quad y=2 \sin (3 x -6)
\end{equation*}
}
\end{column}
\begin{column}{0.5\textwidth}
\only<1-2>{
\includegraphics[width=0.9\textwidth]{travelling_sine_1.png}
}
\only<3-5>{
\includegraphics[width=0.9\textwidth]{travelling_sine_2.png}
}
\only<6>{
\includegraphics[width=0.9\textwidth]{travelling_sine_3.png}
}
\only<2-6>{
\begin{equation*}
k=\frac{2 \pi}{\lambda} = 3
\end{equation*}
}
\only<6>{
\begin{flalign*}
&+ \textrm{direction} \quad x \rightarrow x - \upsilon t\\
&- \textrm{direction} \quad x \rightarrow x + \upsilon t
\end{flalign*}
}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{\footnotesize Now I'm going to change to something which is real wave.
y equal to twice sine of three x; that's a wave, it is not moving yet.
And lambda which we call wavelength in this case is two pi over three.
We can also define lambda between consequtive wave peaks.
I will introduce new symbol k which we call wavenumber and k is simply defined as
two pi over lambda, so in our case it is three. If you know this number you can immediately tell what the wavelenght is.
Now I want to have this wave to move.
I want to have travelling wave.
And I want to move it with the speed of 6 meters per secund in the positive direction.
So the receipe is very simple.
All I have to do is to replace x by x minus six t.
And now if you look at this equation and plot it a little bit later than time t zero you will see that indeed it has moved in the positive direction.
And it is moving with the speed of 6 meters per sucund.
So this equation holds all characteristics of the oscilation.
It holds the amplitude which is 2.
k holds the information about the wavelenght and this information tells you what the speed is and a minus sign, which is important, tells you that it propagates in positive direction.
If we want to have travelling wave to the left or munus direction we have to have plus sign here.
Peaks of the wave denoted by blue dots are called wave crests and valleys denoted by red dots are called troughs.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Travelling wave - basic definitions}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}[T]{0.5\textwidth}
\only<1->{
\includegraphics[width=0.9\textwidth]{travelling_wave_string.png}
}
\only<1->{
\begin{equation*}
y=2 \sin \left(\underbrace{3}_{k} (x -\underbrace{6}_{\upsilon} t) \right)
\end{equation*}
}
\only<2->{
\begin{equation*}
y = 2 \sin (\underbrace{3}_{k} x -\underbrace{18}_{\omega} t)
\end{equation*}
}
\only<3>{\url{https://phet.colorado.edu/sims/html/wave-on-a-string/latest/wave-on-a-string_en.html}}
\end{column}
\begin{column}[T]{0.5\textwidth}
\centering
\only<1->{
Period of one oscillation
\begin{equation*}
T=\frac{2 \pi}{\omega}
\end{equation*}
Wavelength
\begin{equation*}
\lambda = \upsilon T = \frac{\upsilon}{f}
\end{equation*}
Frequency
\begin{equation*}
f = \frac{\upsilon}{\lambda}
\end{equation*}
}
\only<3->{
Velocity
\begin{equation*}
\boxed{\upsilon = \frac{\omega}{k}}
\end{equation*}
}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{We can make such a travelling wave by attaching a string to a wheel rotating with angular frequency omega and radius R.
If the radius has two units it will give us the same amplitude.
So the one period of oscilation is two pi over omega.
The wave is going to propagate with velocity v.
Hence, in order to travel the distance Lambda we need to multiply velocity v by one period T which gives us wavelength.
We can also define freqency as as ratio of velocity to wavelenght.
And now I can write the equation in somewhat different form in which we can distinct spatial part and time part.
This is wavenumber k and this is now angular frequecy omega.
Then I can find velocity which is omega over k.
}
% https://www.compadre.org/nexusph/course/Sinusoidal_waves
%https://phet.colorado.edu/sims/html/wave-on-a-string/latest/wave-on-a-string_en.html
% propagating sine wave
% no damping, no end (infinite string under tension, oscilate)
% standing wave
% no damping, fixed end, oscilate
% first harmonic, f1=0.41 Hz
% second harmonic, f2=0.83 Hz
% third harmonic f3=1.24 Hz
% fourth harmonic f4=1.66 Hz
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Standing waves}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\only<1->{
\begin{alertblock}{Standing waves}
Standing wave (stationary wave) is the wave that remains in a constant position (there is no shifting of the waveform).
\end{alertblock}}
\only<2->{
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\centering
\(\longrightarrow\)
\begin{equation*}
y_1 = y_0 \sin(k x - \omega t)
\end{equation*}
\end{column}
\begin{column}{0.5\textwidth}
\centering
\(\longleftarrow\)
\begin{equation*}
y_1 = y_0 \sin(k x + \omega t)
\end{equation*}
\end{column}
\end{columns}}
\only<3->{
\begin{equation*}
y = y_1 + y_2
\end{equation*}
\begin{equation*}
\sin(\theta_1) + \sin(\theta_2) = 2\, \sin\left( \frac{\theta_1 + \theta_2}{2} \right) \cos\left( \frac{\theta_1 - \theta_2}{2} \right)
\end{equation*}
\begin{equation*}
y = 2\, y_0 \sin(k x) \, \cos(\omega t)
\end{equation*}
}
\only<4->{
from boundary conditions
\begin{equation*}
y = 2\, y_0 \sin\left(\frac{m \pi}{L} x\right) \, \cos(\omega t)
\end{equation*}}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
Standing wave (stationary wave) is the wave that remains in a constant position.
There is no shifting of the waveform; waveform goes always throgh the same nodes.
Stadning waves can arise due to interference. The sum of two counter propagating waves (of equal amplitude and frequency) creates standing wave.
Standing waves commonly arise when a boundary blocks further propagation of the wave, thus causing wave reflection, and therefore introducing a counter propagating wave (such example is a guitar string).
Let's suppose that I have travelling wave in right direction and I have another wave which is exactly identical in terms of amplitude, but it is travelling in opposite direction.
This plus sign tells me that it is going in this direction.
So if this is a string the net result is the sum of the two.
By employing trigonometric manipulation like this we can get this equation:
Notice that the amplitude has doubled.
And this is very different from travelling wave.
Nowhere you will see kx minus omega t.
All the timing information is separete from the spatial information.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Standing waves (normal modes)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\animategraphics[controls,autoplay,loop,width=0.8\textwidth]{3}{standing_wave_}{1}{33}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Basic terminology}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\begin{figure}
\only<1>{
\includegraphics[width=0.9\textwidth]{Fig_1_7.jpg}
}
\only<2>{
\includegraphics[width=0.9\textwidth]{Fig_1_8.jpg}
}
\end{figure}
\end{column}
\begin{column}{0.5\textwidth}
\begin{itemize}
\item Phase velocity \(c_p=\frac{\omega}{k}\) - velocity of wave crests
\item Group velocity \(c_g = \frac{\drv \omega}{\drv k}\) - velocity of wave packets
\item Wavelength \(\lambda\) - distance between two consecutive crests or two consecutive troughs
\item Wavenumber \(k\)
\item Frequency \(f\)
\end{itemize}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Wave modulation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Consider two right propagating waves of equal amplitude, and different frequency and wavenumber
\begin{equation*}
u(x,t)=U_0 \left[ \sin(k_1 x - \omega_1 t) + \sin( k_2 x - \omega_2 t)\right]
\end{equation*}
\begin{equation*}
u(x,t)=2 U_0\, \cos\left(\frac{k_1- k_2}{2} x - \frac{\omega_1-\omega_2}{2} t\right) \, \sin\left(\frac{ k_1 + k_2} x -\frac{ \omega_1 + \omega_2}{2} t \right)
\end{equation*}
\begin{equation*}
u(x,t)=2 U_0\, \color{darkblue}{\underbrace{\cos(\Delta k x - \Delta \omega t)}_{\textrm{Modulation}}} \, \color{red}{\underbrace{\sin( k_0 x - \omega t)}_{\textrm{Carrier wave}}}
\end{equation*}
\begin{itemize}
\item Beating phenomenon
\item Propagation of a wave packet (assembly of waves)
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Wave modulation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.9\textwidth]{Fig_1_9.jpg}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Wave modulation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Propagation speed of modulating wave defines the propagation speed of the wave packet
\begin{equation*}
\Delta k x - \Delta \omega t = const \quad \rightarrow \quad x = \frac{\Delta \omega}{\Delta k} t + const
\end{equation*}
\begin{equation*}
c_g = \frac{\Delta \omega}{\Delta k}
\end{equation*}
To the limit \(\Delta \omega \rightarrow 0, \Delta k \rightarrow 0\)
\begin{equation*}
\hspace{2.8cm}\boxed{c_g = \frac{\drv \omega}{\drv k}} \quad \textrm{\color{red}{Group velocity}}
\end{equation*}
\begin{itemize}
\item \makebox[3.2cm][l]{non-dispersive media} \(c_g = c_p\)
\item \makebox[3.2cm][l]{dispersive media} \(c_g \neq c_p \)
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Group vs phase velocity (1)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.5\textwidth]{group_vs_phase_velocity.png}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
phase velocity - angle between a line connecting zero and angular frequency value and wavenumber axis
group velocity - tangent
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Group vs phase velocity (2)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\begin{figure}
\animategraphics[autoplay,loop,width=0.6\textwidth]{5}{wave_phase_equal_group_velocity_}{1}{50}
\caption{\(c_g = c_p\)}
\end{figure}
\begin{figure}
\animategraphics[autoplay,loop,width=0.6\textwidth]{5}{wave_opposite_group_phase_velocity_}{1}{50}
\caption{\(c_g>0,\,c_p<0\)}
\end{figure}
\end{column}
\begin{column}{0.5\textwidth}
\begin{figure}
\animategraphics[autoplay,loop,width=0.6\textwidth]{5}{wave_phase_faster_than_group_velocity_}{1}{50}
\caption{\(c_p > c_g\)}
\end{figure}
\begin{figure}
\animategraphics[autoplay,loop,width=0.6\textwidth]{5}{wave_phase_slower_than_group_velocity_}{1}{50}
\caption{\(c_p < c_g\)}
\end{figure}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
For positive group velocity and negative phase velocity the wave is dancing like Michael Jackson. It is like his feet are going to the left whereas his body is going to the right.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{What is wave dispersion?}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Dispersion curves (1)}
\begin{alertblock}{Definition}
A \textbf{dispersion relation} relates the wavelength $\lambda$ or wavenumber $k$ of a wave to its frequency $\omega$.\\
\begin{equation*}
\boxed{k=k(\omega)} \quad \textrm{or} \quad \boxed{\omega = \omega(k)}
\end{equation*}
\vspace{10pt}
$k(\omega)$ $[\frac{\mathrm{rad}}{\mathrm{m}}] \quad k(f)$ $[\frac{1}{\mathrm{m}}]$
\end{alertblock}
\begin{block}{Phase velocity}
\begin{equation*}
c_p = \frac{\omega}{k} = const\quad \textrm{non-dispersive}
\end{equation*}
\end{block}
\begin{block}{Group velocity}
\begin{equation*}
c_g = \frac{\drv \omega}{\drv k} = const \quad \textrm{non-dispersive}
\end{equation*}
\end{block}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
What is the phenomenon of dispersion?
This is the relationship between the wavenumber or wavelength and its frequency.
I usually use the wavenumber and the linear frequency, hence the unit of the wave number is one over a meter.
The condition necessary to have non-dispersive wave is that phase velocity must be constant.
It implies that the group velocity is also constant and actually equal to phase velocity.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Dispersion curves (2)}
\begin{figure}
\includegraphics[width=0.8\textwidth]{linear_dispersion.png}
\end{figure}
A nondispersive medium is characterized by:
\begin{itemize}
\item A linear dispersion relation
\item The phase velocity is constant at all frequencies
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Dispersion effect (1)}
\begin{columns}[T]
\column{0.5\textwidth}
\begin{figure}
\includegraphics[width=0.7\textwidth]{dispersion/excitation_narrow_time.png}
\includegraphics[width=0.7\textwidth]{dispersion/excitation_wide_time.png}
\end{figure}
\column{0.5\textwidth}
\newcommand{\modelname}{dispersion_effect}
\begin{figure}
\includegraphics[width=0.7\textwidth]{dispersion/excitation_narrow_frequency.png}
\includegraphics[width=0.7\textwidth]{dispersion/excitation_wide_frequency.png}
\end{figure}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
To demonstrate to you what the phenomenon of wave dispersion reveals, I have prepared an example.
Consider two signals: one with a 200 kHz carrier frequency that looks like this and the other with a 20 kHz carrier frequency that looks like this.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Dispersion effect (2)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\column{0.5\textwidth}
\only<1>{
\begin{figure}
\includegraphics[width=0.8\textwidth]{dispersion/A0_dispersion_less_dispersive.png}
\caption{Less dispersive region}
\end{figure}
\column{0.5\textwidth}
\newcommand{\modelname}{dispersion_effect}
\begin{figure}
\includegraphics[width=0.8\textwidth]{dispersion/A0_dispersion_dispersive.png}
\caption{Dispersive region}
\end{figure}
}
\only<2>{
\begin{figure}
\includegraphics[width=0.8\textwidth]{dispersion/A0_phase_velocity_less_dispersive.png}
\caption{Less dispersive region}
\end{figure}
\column{0.5\textwidth}
\newcommand{\modelname}{dispersion_effect}
\begin{figure}
\includegraphics[width=0.8\textwidth]{dispersion/A0_phase_velocity_dispersive.png}
\caption{Dispersive region}
\end{figure}
}
\only<3>{
\begin{figure}
\includegraphics[width=0.8\textwidth]{dispersion/A0_group_velocity_less_dispersive.png}
\caption{Less dispersive region}
\end{figure}
\column{0.5\textwidth}
\begin{figure}
\includegraphics[width=0.8\textwidth]{dispersion/A0_group_velocity_dispersive.png}
\caption{Dispersive region}
\end{figure}
}
\end{columns}
\begin{center}
\only<2>{\alert{Phase velocities}}
\only<3>{\alert{Group velocities}}
\end{center}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
Let's superimpose the frequency spectra on the dispersion curves.
The spectrum on the left is in a region with less dispersion than the spectrum on the right.
You can see that the frquencies of our signal on the left hand side are on almost linear portion of the dispersion curves whereas the frequencies of the signal on the right hand side are on a nonlinear portion of the dispersion curve.
This effect is easier to explain by taking into account phase velocities.
The flatter the curve, the smaller the wave dispersion.
The same is true for group velocities.
So if we want to avoid or alleviete the effect of dispersion we need to use narrow frequency band excitation signal with carrier frequency which corresponds to flat region of dispersion curves expressed as relationship between wave velocity and frequencies.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Dispersion effect (3)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\animategraphics[autoplay,loop,width=0.8\textwidth]{1}{dispersion/dispersion_effect_less_dispersive_L_}{1}{11}
\end{figure}
\vspace{-5mm}
\begin{figure}
\animategraphics[autoplay,loop,width=0.8\textwidth]{1}{dispersion/dispersion_effect_dispersive_L_}{1}{11}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
The dispersion effect is visible in this animation.
The wave shape at the top does not change much with the propagation distance, while the wave at the bottom is dispersive and deforms with the propagation distance.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{1D wave propagation}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Longitudinal waves in rod (1)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.8\textwidth]{rod_segment_with_loads.png}
\caption{Segment of a rod}
\end{figure}
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\(E\) Young's modulus\\
\(\rho\) mass density\\
\(\eta\) viscous damping per unit volume\\
\(q(x,t)\) external force per unit length\\
\end{column}
\begin{column}{0.5\textwidth}
According to Newton's second law of motion:
\begin{equation*}
\sum Fx = m a_x
\end{equation*}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Longitudinal waves in rod (2)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.8\textwidth]{rod_segment_with_loads.png}
\end{figure}
\begin{columns}[T]
\begin{column}{0.5\textwidth}
Following the assumption of only one displacement \(u(x)\), the axial strain is given by:
\begin{equation*}
\varepsilon_{xx} = \frac{\partial u}{\partial x}
\end{equation*}
\end{column}
\begin{column}{0.5\textwidth}
Considering 1D form of Hooke's law (linear elastic material):
\begin{equation*}
\sigma_{xx} = E \varepsilon_{xx}
\end{equation*}
\end{column}
\end{columns}
Resultant axial force:
\begin{equation*}
F=\int_A \sigma_{xx} \drv A = EA \frac{\partial u}{\partial x}
\end{equation*}
\begin{equation*}
-F + (F + \Delta F) + q \Delta x - \eta A \Delta x \dot{u} = \rho A \Delta x \ddot{u}
\end{equation*}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Longitudinal waves in rod (3)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\only<1->{
If \(\Delta\) quantities are small
\begin{equation*}
\frac{\partial F}{\partial x} = \rho A \frac{\partial^2 u}{\partial t^2} + \eta A \frac{\partial u}{\partial t} - q
\end{equation*}
Substitute
\begin{equation*}
F= EA \frac{\partial u}{\partial x}
\end{equation*}
\begin{equation*}
\frac{\partial }{\partial x} \left[ EA \frac{\partial u}{\partial x} \right]= \rho A \frac{\partial^2 u}{\partial t^2} + \eta A \frac{\partial u}{\partial t} - q
\end{equation*}
}
\only<2->{
In the special case of uniform properties and no damping homogenous equation is:
\begin{equation*}
c_0^2 \frac{\partial^2 u(x,t)}{\partial x^2} - \frac{\partial^2 u(x,t)}{\partial t^2} = 0, \quad c_0 = \sqrt{\frac{EA}{\rho A}}
\end{equation*}
D'Alembert general solution
\begin{equation*}
u(x,t) = f(x-c_0 t) + g(x+c_0 t)
\end{equation*}
}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
D'Alembert solution does not hold for the general rods therefore will not be pursued any further. Homogenous equation can also be solved by using separation of variables. But we will consider more general case on next slides.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Elementary rod theory - spectral analysis (1)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{equation*}
\frac{\partial }{\partial x} \left[ EA \frac{\partial u}{\partial x} \right] - \rho A \frac{\partial^2 u}{\partial t^2} - \eta A \frac{\partial u}{\partial t} = - q
\end{equation*}
\(\xrightarrow{\mathcal{F}} \)
\begin{equation*}
\frac{\drv}{\drv x}\left[EA \frac{\drv \hat{u}}{\drv x}\right] + \omega^2 \rho A \hat{u} - i \omega \eta A \hat{u} = -\hat{q}
\end{equation*}
Assumption: \(EA\) is not changing along the rod\\
Homogeneous equation (without external force):
\begin{equation*}
EA \frac{\drv^2 \hat{u}}{\drv x^2} + \left(\omega^2 \rho A - i \omega \eta A \right)\hat{u} = 0
\end{equation*}
Assume solution in the form:
\begin{equation*}
\hat{u}(x) = \matr{A} \myexp^{-k_1x} + \matr{B} \myexp^{+k_1x}
\end{equation*}
\(\matr{A}, \matr{B}\) undetermined amplitudes at each frequency
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Elementary rod theory - spectral analysis (2)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\only<1->{
Spectrum relation:
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\centering
damped case:
\begin{equation*}
k_1 = \sqrt{\frac{\omega^2 \rho A - i \omega \eta A}{EA}}
\end{equation*}
\begin{equation*}
c_p = \frac{\omega}{k} = \frac{\omega}{\sqrt{\frac{\omega^2 \rho A - i \omega \eta A}{EA}}}
\end{equation*}
\end{column}
\begin{column}{0.5\textwidth}
\centering
undamped case:
\begin{equation*}
k_1 = \sqrt{\frac{\omega^2 \rho A}{EA}}
\end{equation*}
\begin{equation*}
c_p = \frac{\omega}{k} = \sqrt{\frac{EA}{\rho A}} = c_0, \quad c_g = \frac{\drv \omega}{\drv k}\sqrt{\frac{EA}{\rho A}} = c_0
\end{equation*}
\end{column}
\end{columns}
}
\only<2->{
\(\xrightarrow{\mathcal{F}^{-1}} \)
\begin{equation*}
u(x,t) = \sum \matr{A} \myexp^{-i(k_1 x - \omega t)} + \sum \matr{B} \myexp^{+i (k_1 x +\omega t)}
\end{equation*}
}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Elementary rod theory - spectral analysis (3)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.6\textwidth]{elastic_vs_viscoelastic_rod_Doyle.png}
\caption{Comparison of elastic and viscoelastic materials in terms of spectrum relations for rod}
\end{figure}
{\scriptsize
\begin{biblio}{Source}
\biblioref{Doyle J.F.}{1997}{ Wave Propagation in Structures: Spectral Analysis Using Fast Discrete Fourier Transforms}{Springer}
\end{biblio}}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Spectral rod element}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.8\textwidth]{spectral_rod.png}
\caption{Spectral rod element along with throw-off element}
\end{figure}
General longitudinal displacement
\begin{equation*}
\hat{u}(x) = \matr{A} \myexp^{-i k_1 x} +\matr{B} \myexp^{-i k_1 (L-x)}
\end{equation*}
\( \matr{A}, \matr{B}\) determined from boundary conditions
Nodal displacements (degrees of freedom):
\begin{equation*}
\hat{u}(0) = \hat{u}_1 = \matr{A} + \matr{B} \myexp^{- i k_1 L} \quad \hat{u}(L) = \hat{u}_2 = \matr{A} \myexp^{- i k_1 L} + \matr{B}
\end{equation*}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Spectral rod element: shape functions}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.8\textwidth]{spectral_rod.png}
\end{figure}
\begin{equation*}
\hat{u}(x) = \hat{N}_1(x) \hat{u}_1 + \hat{N}_2(x) \hat{u}_2
\end{equation*}
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\begin{align*}
&\hat{N}_1(x) = \left[ \myexp^{-i k _1 x} - \myexp^{-i k_1 (2L-x)}\right]/\Delta\\
&\hat{N}_2(x) = \left[ -\myexp^{-i k _1(L+x) } + \myexp^{-i k_1 (L-x)}\right]/\Delta\\
&\Delta = 1-\myexp^{-i 2 k_1 L}
\end{align*}
\end{column}
\begin{column}{0.5\textwidth}
\begin{align*}
&\hat{N}_1(x) = \csc(k L) \sin\left(k (L-x)\right)\\
&\hat{N}_2(x) = \csc(k L) \sin(k x)
\end{align*}
\end{column}
\end{columns}
For throw-off element
\begin{equation*}
\hat{N}_1(x) = \myexp^{-i k x}
\end{equation*}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{Spectral rod element: dynamic stiffness matrix}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{equation*}
F= EA \frac{\partial u}{\partial x}, \quad \hat{u}(x) = \hat{N}_1(x) \hat{u}_1 + \hat{N}_2(x) \hat{u}_2
\end{equation*}
\begin{align*}
& \hat{F}_1 = -\hat{F}(0) = -\hat{E}\hat{A} \left( \frac{\partial N_1(0)}{\partial x} \hat{u}_1 + \frac{\partial N_2(0)}{\partial x} \hat{u}_2 \right)\\
& \hat{F}_2 = +\hat{F}(L) = +\hat{E}\hat{A} \left( \frac{\partial N_1(L)}{\partial x} \hat{u}_1 + \frac{\partial N_2(L)}{\partial x} \hat{u}_2 \right)
\end{align*}
\begin{equation*}
\left\{
\begin{array}{c}
\hat{F}_1 \\
\hat{F}_2
\end{array}
\right\} = \hat{E}\hat{A}
\left[
\begin{array}{rr}
- \frac{\partial N_1(0)}{\partial x} & - \frac{\partial N_2(0)}{\partial x} \\
\frac{\partial N_1(L)}{\partial x} & \frac{\partial N_2(L)}{\partial x}
\end{array}
\right]
\left\{ \begin{array}{l}\hat{u}_1 \\ \hat{u}_2\end{array}\right\} =
\left[ \begin{array}{ll} \hat{k}_{11}^e & \hat{k}_{12}^e\\
\hat{k}_{21}^e & \hat{k}_{22}^e \end{array}\right]
\left\{ \begin{array}{l} \hat{u}_1 \\ \hat{u}_2\end{array}\right\}
\end{equation*}
\begin{equation*}
\begin{array}{ll}
\hat{k}_{11}^e = k \hat{E}\hat{A} \cot (k L), \quad &\hat{k}_{12}^e = -k \hat{E}\hat{A} \csc (k L)\\
\hat{k}_{21}^e =-k \hat{E}\hat{A} \csc (k L), \quad &\hat{k}_{22}^e = k \hat{E}\hat{A} \cot (k L)
\end{array}
\end{equation*}
throw-off \(\hat{k}^e = \hat{E} \hat{A} i k \)
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Measured waves in a rod}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\begin{figure}
\animategraphics[autoplay,loop,width=1.05\textwidth]{5}{animation-rod-laser-sync/along_}{1}{201}
\end{figure}
\end{column}
\begin{column}{0.5\textwidth}
\begin{figure}
\animategraphics[autoplay,loop,width=0.9\textwidth]{5}{animation-rod-laser-sync/across_}{1}{201}
\end{figure}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Higher order rod theories}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\only<1>{
\includegraphics[width=0.8\textwidth]{Fig_4_1.png}
}
\only<2>{
\includegraphics[width=0.8\textwidth]{Fig_4_2.png}
}
\only<3>{
\includegraphics[width=0.8\textwidth]{Fig_4_3.png}
}
\only<4>{
\includegraphics[width=0.8\textwidth]{Fig_4_4.png}
}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Guided waves}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Ultrasonic testing vs guided wave testing (1)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.6\textwidth]{1280px-UT_vs_GWT.jpg}
\caption{source: wikipedia}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
On the upper figure, bulk waves are introduced to the structure by the transducer head.
They are propagating through the thickness of inspected element.
Based on the characteristics of bulk waves such as velocity and amplitude of reflected signal we can assess only small area but with quite high precision regarding defect occurence. Then we need to move the transducer head to inspect other part of the structure.
On the lower figure, waves are guided by the shape of the structural element.
Hence the name - guided waves.
You can imagine that this is a pipe transporting for example gas or oil and there is a device in the form of a ring of transducers which excite elastic waves.
Similar ring of transducers can be placed far away and register incoming waves.
If there is any anomaly, change of waveform, it would mean that there is a damage in between transducer rings.
In this way inspected area is very large.
Actuators and sensors can be placed permanently and the structure can be monitored online.
However, the precision of inspection can be lower than in case of ultrasonic testing.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Ultrasonic testing vs guided wave testing (2)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\alert{Bulk waves} exist in infinite homogenous bodies and propagate indefinitely without being interrupted by boundaries or interfaces.
These waves can be decomposed into infinite plane waves propagating along arbitrary direction within the solid.
\alert{Guided waves} are those waves that require a boundary for their existence, such as surface waves, Lamb waves, and interface waves.
\vspace{5mm}
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\textbf{Ultrasonic waves}
\begin{itemize}
\item Frequency range: 2 MHz - 200 MHz
\item Wavelength \(\lambda << h\) thickness
\item shorter wavelengths
\end{itemize}
\end{column}
\begin{column}{0.5\textwidth}
\textbf{Guided waves}
\begin{itemize}
\item Typical frequency range: 10 kHz - 1 MHz
\item Wavelength \(\lambda > h\) thickness
\item longer wavelengths
\end{itemize}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Waves used in non-destructive testing}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Elastic wave propagation types depending on particle motion:
\begin{itemize}
\item \alert{The longitudinal wave}is a compressional wave in which the particle motion is in the same direction as the propagation of the wave
\item \alert{The shear wave} is a wave motion in which the particle motion is perpendicular to the direction of the propagation
\item \alert{Surface (Rayleigh) waves} have an elliptical particle motion and travel across the surface of a material. Their velocity is approximately 90\% of the shear wave velocity of the material and their depth of penetration is approximately equal to one
wavelength
\item \alert{Plate (Lamb) waves} have a complex vibration occurring in materials where thickness is less than the wavelength of elastic wave introduced into it.
\end{itemize}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
The most common methods of ultrasonic examination utilize either
longitudinal waves or shear waves. Other forms of elastic wave propagation exist,
including surface waves and Lamb waves.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Waves used in non-destructive testing}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\animategraphics[controls,autoplay,loop,width=0.5\textwidth]{5}{frame}{1}{74}
\caption{\alert{Longitudinal wave} - plane pressure pulse wave (source: wikipedia)}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Waves used in non-destructive testing}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.5\textwidth]{Fig_1_1.jpg}
\caption{\alert{Shear horizontal wave} }
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Waves used in non-destructive testing}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.5\textwidth]{Fig_1_3.jpg}
\caption{\alert{Rayleigh waves} }
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Waves used in non-destructive testing}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\includegraphics[width=0.5\textwidth]{Fig_1_4.jpg}
\caption{\alert{Love waves} (surface seismic waves) named after Augustus Edward Hough Love }
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Waves used in non-destructive testing}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\begin{figure}
\includegraphics[width=0.9\textwidth]{Fig_1_5.jpg}
\caption{Fundamental symmetric, S0, \alert{Lamb wave} mode }
\end{figure}
\end{column}
\begin{column}{0.5\textwidth}
\begin{figure}
\includegraphics[width=0.9\textwidth]{Fig_1_6.jpg}
\caption{Fundamental antisymmetric, A0, \alert{Lamb wave} mode }
\end{figure}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Lamb waves}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{alertblock}{Lamb waves}
Lamb waves are plane waves propagating in thin plates.\\
Shear vertical waves in conjunction with longitudinal P waves interacts with plate surfaces resulting in complex wave mechanism which leads to creation of Lamb waves.
\end{alertblock}
Horace Lamb discovered these type of waves in 1917.
He derived theory and dispersion relations.
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\centering
symmetric modes
\begin{equation*}
\frac{\tan(q h)}{\tan(p h)} = -\frac{4 k^2 p q}{\left(q^2 - k^2\right)^2}
\end{equation*}
\end{column}
\begin{column}{0.5\textwidth}
\centering
antisymmetric modes
\begin{equation*}
\frac{\tan(q h)}{\tan(p h)} = -\frac{\left(q^2 - k^2\right)^2}{4 k^2 p q}
\end{equation*}
\end{column}
\end{columns}
\centering
\(q=q(\omega,k), \quad p=p(\omega,k) \)
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Dispersion curves of Lamb waves}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\only<1>{
\includegraphics[width=0.8\textwidth]{Fig_1_12.png}
}
\only<2>{
\includegraphics[width=0.8\textwidth]{Fig_1_13.png}
}
\end{figure}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\section{Challenges in simulations of elastic wave propagation phenomenon}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Excitation and measurement techniques}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Piezoelectricity}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\begin{figure}
\animategraphics[autoplay,loop,width=0.7\textwidth]{3}{piezoelectricity_effect/piezoelectricity_effect_}{1}{25}
\caption{Source: \url{https://commons.wikimedia.org/wiki/File:SchemaPiezo.gif }}
\end{figure}
\end{column}
\begin{column}{0.5\textwidth}
Piezoelectricity is the electric charge that accumulates in certain solid materials (such as crystals, certain ceramics,) in response to applied mechanical stress. \\
Lead zirconate titanate\\
Pb[Zr\textsubscript{x}Ti\textsubscript{1-x}]O\textsubscript{3} \\
more commonly known as PZT, is the most commonly used piezoelectric ceramic today
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}[t]{SLDV measurements}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\column{0.5\textwidth}
\begin{figure}
\includegraphics[width=0.8\textwidth]{wibrometr-laserowy-1d_small-description.png}
\end{figure}
\column{0.5\textwidth}
\begin{enumerate}
\item Signal generator: TTI 1241
\item Amplifier: Piezo Systems EPA-104-230 $\pm$200 Vp
\item Specimen
\item Scanning head: Polytec PSV-400
\item DAQ system: Polytec
\end{enumerate}
\end{columns}
{\small
Vibrometer allows measurements of vibration velocities in range 0.01~$\mu$m/s $-$ 10 m/s for frequencies from DC up to 1.5~MHz for measuring distance from 40~cm up to dozens of meters. Scanning resolution is 0.002$^{\circ}$ which provides possibility of defining about 300 000 points in laser working area.}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
% https://www.youtube.com/watch?v=o0qkmG_S4QY
\note{
This picture shows the elements of the review of the Lamba wave propagation measurement. These are signal generators, amplifier, sample, laser vibrometer head and analogue to digital converters.
The laser vibrometer measures the vibration velocities along the laser beam.
Theoretically, vibration measurement up to 1.5 MHz is included.
In practice it depends on many parameters such as vibration amplitude, type of decoder, distance of the laser from the sample, attenuation of the material, etc.
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Assignment}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Assignment}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Answer to the following questions:
\begin{enumerate}
\item What is the difference between wave propagation and vibrations?
\item What is the difference between bulk waves and Lamb waves?
\end{enumerate}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{References}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{biblio}{Recommended books}
\biblioref{Rose J.L.}{1999}{ Ultrasonic Waves in Solid Media}{Cambridge University Press}
\biblioref{Doyle J.F.}{1997}{ Wave Propagation in Structures: Spectral Analysis Using Fast Discrete Fourier Transforms}{Springer}
\biblioref{Ostachowicz W., Kudela P., Krawczuk M., Zak A.}{2012}{ Guided Waves in Structures for SHM: The Time Domain Spectral Element Method}{Wiley}
\end{biblio}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
{\setbeamercolor{palette primary}{fg=black, bg=white}
\begin{frame}[standout]
Thank you for your attention!\\ \vspace{12pt}
Questions?\\ \vspace{12pt}
\url{[email protected]}
\end{frame}
}
\note{Thank you for your attention!
I hope that you have learnt something from my presentation.
So, if you have any questions please drop me a line.
See you next time!}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% END OF SLIDES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Empty}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{frame}{Empty 2-columns}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{columns}[T]
\begin{column}{0.5\textwidth}
\end{column}
\begin{column}{0.5\textwidth}
\end{column}
\end{columns}
\end{frame}
%%%%%%%%%%%%%%%%%%%%
\note{
} | {
"alphanum_fraction": 0.6220490845,
"avg_line_length": 37.5566934894,
"ext": "tex",
"hexsha": "88d7e01a59541d36b1f238f3b7cd882d289498e5",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c",
"max_forks_repo_licenses": [
"RSA-MD"
],
"max_forks_repo_name": "IFFM-PAS-MISD/aidd",
"max_forks_repo_path": "reports/beamer_presentations/Lectures/Wave_Propagation_Fundamentals_1/Wave_Propagation_Fundamentals_1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"RSA-MD"
],
"max_issues_repo_name": "IFFM-PAS-MISD/aidd",
"max_issues_repo_path": "reports/beamer_presentations/Lectures/Wave_Propagation_Fundamentals_1/Wave_Propagation_Fundamentals_1.tex",
"max_line_length": 310,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "9fb0ad6d5e6d94531c34778a66127e5913a3830c",
"max_stars_repo_licenses": [
"RSA-MD"
],
"max_stars_repo_name": "IFFM-PAS-MISD/aidd",
"max_stars_repo_path": "reports/beamer_presentations/Lectures/Wave_Propagation_Fundamentals_1/Wave_Propagation_Fundamentals_1.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-03T05:36:07.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-03T05:36:07.000Z",
"num_tokens": 14935,
"size": 51340
} |
\subsection{Second and Other Derivatives}
If $f$ is a differentiable function then its derivative $f'$ is also a function and so we can take the derivative of $f'$.
The new function, denoted by $f''$, is called the \dfont{second derivative} of $f$, since it is the derivative of the derivative of $f$.
The following symbols represent the second derivative:
$$f''(x)=y''=\frac{d^2y}{dx^2}=\frac{d}{dx}\left(\frac{dy}{dx}\right).$$
We can continue this process to get the third derivative of $f$.
In general, the \dfont{$n$th derivative} of $f$ is denoted by $f^{(n)}$ and is obtained from $f$ by differentiating $n$ times.
If $y=f(x)$, then we write:
$$y^{(n)}=f^{(n)}(x)=\frac{d^ny}{dx^n}.$$ | {
"alphanum_fraction": 0.6873198847,
"avg_line_length": 57.8333333333,
"ext": "tex",
"hexsha": "0dc0501daf4774ad948f2722b4c618d01d8ee542",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7d0110b6bc4ba42a6b911729420e1406296d6964",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "TimAlderson/OpenCalc",
"max_forks_repo_path": "4-derivatives/4-3-2-second-other-derivatives.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7d0110b6bc4ba42a6b911729420e1406296d6964",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "TimAlderson/OpenCalc",
"max_issues_repo_path": "4-derivatives/4-3-2-second-other-derivatives.tex",
"max_line_length": 136,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7d0110b6bc4ba42a6b911729420e1406296d6964",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "TimAlderson/OpenCalc",
"max_stars_repo_path": "4-derivatives/4-3-2-second-other-derivatives.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 221,
"size": 694
} |
\chapter{Basic Dialogue Processing}
\label{Chapter:BasicDialogue}
\author{Manny Rayner}
\section{Using dialogue mode}
\label{Section:DialogueMode}
\section{A toy dialogue system}
\label{Section:ToyDialogue}
\section{Writing an input manager}
\label{Section:InputManager}
\section{Writing a dialogue manager}
\label{Section:DialogueManager}
\section{Writing an output manager}
\label{Section:OutputManager}
\section{Config files for dialogue systems}
\label{Section:DialogueConfig}
| {
"alphanum_fraction": 0.8053278689,
"avg_line_length": 21.2173913043,
"ext": "tex",
"hexsha": "f067eed729789fb7169870ab45dfbbb560032433",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5c3e5013a3048da7d68a8a43476ad84d3ea4bb47",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TeamSPoon/logicmoo_nlu",
"max_forks_repo_path": "ext/regulus/doc/Cookbook/basic-dialogue.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "44025b6e389e2f2f7d86b46c1301cab0604bba26",
"max_issues_repo_issues_event_max_datetime": "2020-02-02T13:12:34.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-02-02T13:12:34.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "logicmoo/old_logicmoo_workspace",
"max_issues_repo_path": "pack/logicmoo_nlu/prolog/regulus/doc/Cookbook/basic-dialogue.tex",
"max_line_length": 43,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "5c3e5013a3048da7d68a8a43476ad84d3ea4bb47",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TeamSPoon/logicmoo_nlu",
"max_stars_repo_path": "ext/regulus/doc/Cookbook/basic-dialogue.tex",
"max_stars_repo_stars_event_max_datetime": "2020-02-28T19:30:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-01-27T12:08:02.000Z",
"num_tokens": 119,
"size": 488
} |
\documentclass[12pt]{amsart}
\usepackage{geometry} % see geometry.pdf on how to lay out the page. There's lots.
\geometry{a4paper} % or letter or a5paper or ... etc
\usepackage[T1]{fontenc}
\usepackage[latin9]{inputenc}
\usepackage{amsmath}
\usepackage{amsaddr}
\usepackage{dirtytalk}
\usepackage{float}
\usepackage{listings}
\usepackage{hyperref}
\usepackage{enumerate}
\usepackage{color}
\definecolor{codegreen}{rgb}{0,0.6,0}
\definecolor{codegray}{rgb}{0.5,0.5,0.5}
\definecolor{stringcolor}{rgb}{0.7,0.23,0.36}
\definecolor{backcolour}{rgb}{0.95,0.95,0.92}
\definecolor{keycolor}{rgb}{0.007,0.01,1.0}
\definecolor{itemcolor}{rgb}{0.01,0.0,0.49}
\hypersetup{
colorlinks=true,
linkcolor=blue,
filecolor=blue,
urlcolor=blue,
}
\lstdefinestyle{mystyle}{
%backgroundcolor=\color{backcolour},
commentstyle=\color{codegreen},
keywordstyle=\color{keycolor},
numberstyle=\tiny\color{codegray},
stringstyle=\color{stringcolor},
basicstyle=\footnotesize,
breakatwhitespace=false,
breaklines=true,
captionpos=b,
keepspaces=true,
numbers=left,
numbersep=5pt,
showspaces=false,
showstringspaces=false,
showtabs=false,
tabsize=2
}
\lstset{style=mystyle}
\lstdefinelanguage{Swift}{
keywords={associatedtype, class, deinit, enum, extension, func, import, init, inout, internal, let, operator, private, protocol, public, static, struct, subscript, typealias, var, break, case, continue, default, defer, do, else, fallthrough, for, guard, if, in, repeat, return, switch, where, while, as, catch, dynamicType, false, is, nil, rethrows, super, self, Self, throw, throws, true, try, associativity, convenience, dynamic, didSet, final, get, infix, indirect, lazy, left, mutating, none, nonmutating, optional, override, postfix, precedence, prefix, Protocol, required, right, set, Type, unowned, weak, willSet},
ndkeywords={class, export, boolean, throw, implements, import, this},
sensitive=false,
comment=[l]{//},
morecomment=[s]{/*}{*/},
morestring=[b]',
morestring=[b]"
}
\lstset{emph={Int,count,abs,repeating,Array}, emphstyle=\color{itemcolor}}
\title{Week 03}
\date{\today}
\lstset{style=mystyle}
%%% BEGIN DOCUMENT
\begin{document}
\maketitle
\section{Preparation for Assignment}
If, and \textit{only if} you can truthfully assert the truthfulness of each statement below are you ready to start the assignment.
\subsection {Reading Comprehension Self-Check}
\begin{itemize}
\item I know that \textit{brute force} is a straightforward approach to solving a problem, usually directly based on the problem statement and definitions of the concepts involved.
\item I understand that a first application of the brute-force approach often
results in an algorithm that can be improved with a modest amount of effort.
\item I know \textit{why} it is \textbf{false} to say that a strength of the brute-force
approach is subpar algorithmic efficiency.
\item I understand that \textit{exhaustive search} is a brute-force approach to combinatorial problems that suggests generating each and every combinatorial object of the problem, selecting those of them that satisfy all the constraints, and then finding a desired object.
\item I know why it is \textbf{false} to say that exhaustive search is practical for all but very small instances of problems it can be applied to.
\item I have reviewed Appendix A specifically focusing on understanding the formulas and rules for summations.
\item I have studied and pondered these common algorithms and understand why they have the time efficiencies they do:
\begin{itemize}
\item Finding the maximum element in an array.
\item Determining the uniqueness of all elements of an array.
\item Multiplying two $n$-by-$n$ matrices.
\item Converting a base 10 number to binary.
\end{itemize}
\end{itemize}
\subsection{Memory Self-Check}
\subsubsection{Algorithm Efficiency Calculation}
Create brute force algorithms for each of these three situations:
\begin{enumerate}
\item Computing $a^n$, where $a$ is positive and $n$ is a nonnegative integer.
\item Computing $n!$.
\item Sequential search.
\end{enumerate}.
Determine $\mathcal{O}$, $\Omega$, $\Theta$, input size, and time efficiency for each algorithm.
\section{Week 02 Exercises}
\subsection{ Exercise 1 on page 102}
\subsection{Exercise 8 on page 103}
\subsection{Exercise 6 on page 114}
\subsection{Exercise 4 on page 128}
\subsection{Exercise 8 on page 129}
\subsection{Find the Door}
You are facing a wall that stretches infinitely in both directions. There is a door in the wall, but you know neither how far away nor in which direction. You can see the door only when you are right next to it. Design and write code for an algorithm that enables you to reach the door by walking at most $\mathcal{O}(n)$ steps where $n$ is the (unknown to you) number of steps between your initial position and the door. (Hint: walk alternately right and left going each time exponentially farther from your initial position.)
\section{Week 02 Problems}
\subsection{Exercise 14 on page 103}
\subsection{Exercise 5 on page 129} Make sure you do a rigorous mathematical proof.
\subsection{Exercise 6 on page 121} Write code for this algorithm.
\end{document} | {
"alphanum_fraction": 0.7302257295,
"avg_line_length": 43.246031746,
"ext": "tex",
"hexsha": "43db1b4bdaef3e661c7c419069ce7aaa564be086",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fedcd59cd399a77f2f8d7e4b21c83fff46fc675e",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "chhocker/cse381-course",
"max_forks_repo_path": "weeklies/wa03.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fedcd59cd399a77f2f8d7e4b21c83fff46fc675e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "chhocker/cse381-course",
"max_issues_repo_path": "weeklies/wa03.tex",
"max_line_length": 623,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "fedcd59cd399a77f2f8d7e4b21c83fff46fc675e",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "chhocker/cse381-course",
"max_stars_repo_path": "weeklies/wa03.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-19T02:48:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-19T02:48:43.000Z",
"num_tokens": 1430,
"size": 5449
} |
\documentclass{howto}
\usepackage{ltxmarkup}
\usepackage{times}
\usepackage{distutils}
% $Id$
\title{Distributing Python Modules}
\author{Greg Ward}
\authoraddress{E-mail: \email{[email protected]}}
\makeindex
\begin{document}
\maketitle
\begin{abstract}
\noindent
This document describes the Python Distribution Utilities
(``Distutils'') from the module developer's point-of-view, describing
how to use the Distutils to make Python modules and extensions easily
available to a wider audience with very little overhead for
build/release/install mechanics.
\end{abstract}
\tableofcontents
\section{Introduction}
\label{intro}
In the past, Python module developers have not had much infrastructure
support for distributing modules, nor have Python users had much support
for installing and maintaining third-party modules. With the
introduction of the Python Distribution Utilities (Distutils for short)
in Python 1.6, this situation should start to improve.
This document only covers using the Distutils to distribute your Python
modules. Using the Distutils does not tie you to Python 1.6, though:
the Distutils work just fine with Python 1.5.2, and it is reasonable
(and expected to become commonplace) to expect users of Python 1.5.2 to
download and install the Distutils separately before they can install
your modules. Python 1.6 (or later) users, of course, won't have to add
anything to their Python installation in order to use the Distutils to
install third-party modules.
This document concentrates on the role of developer/distributor: if
you're looking for information on installing Python modules, you
should refer to the \citetitle[../inst/inst.html]{Installing Python
Modules} manual.
\section{Concepts \& Terminology}
\label{concepts}
Using the Distutils is quite simple, both for module developers and for
users/administrators installing third-party modules. As a developer,
your responsibilites (apart from writing solid, well-documented and
well-tested code, of course!) are:
\begin{itemize}
\item write a setup script (\file{setup.py} by convention)
\item (optional) write a setup configuration file
\item create a source distribution
\item (optional) create one or more built (binary) distributions
\end{itemize}
Each of these tasks is covered in this document.
Not all module developers have access to a multitude of platforms, so
it's not always feasible to expect them to create a multitude of built
distributions. It is hoped that a class of intermediaries, called
\emph{packagers}, will arise to address this need. Packagers will take
source distributions released by module developers, build them on one or
more platforms, and release the resulting built distributions. Thus,
users on the most popular platforms will be able to install most popular
Python module distributions in the most natural way for their platform,
without having to run a single setup script or compile a line of code.
\subsection{A simple example}
\label{simple-example}
The setup script is usually quite simple, although since it's written in
Python, there are no arbitrary limits to what you can do with
it.\footnote{But be careful about putting arbitrarily expensive
operations in your setup script; unlike, say, Autoconf-style configure
scripts, the setup script may be run multiple times in the course of
building and installing your module distribution. If you need to
insert potentially expensive processing steps into the Distutils
chain, see section~\ref{extending} on extending the Distutils.} If
all you want to do is distribute a module called \module{foo}, contained
in a file \file{foo.py}, then your setup script can be as little as
this:
\begin{verbatim}
from distutils.core import setup
setup (name = "foo",
version = "1.0",
py_modules = ["foo"])
\end{verbatim}
Some observations:
\begin{itemize}
\item most information that you supply to the Distutils is supplied as
keyword arguments to the \function{setup()} function
\item those keyword arguments fall into two categories: package
meta-data (name, version number) and information about what's in the
package (a list of pure Python modules, in this case)
\item modules are specified by module name, not filename (the same will
hold true for packages and extensions)
\item it's recommended that you supply a little more meta-data, in
particular your name, email address and a URL for the project
(see section~\ref{setup-script} for an example)
\end{itemize}
To create a source distribution for this module, you would create a
setup script, \file{setup.py}, containing the above code, and run:
\begin{verbatim}
python setup.py sdist
\end{verbatim}
which will create an archive file (e.g., tarball on Unix, zip file on
Windows) containing your setup script, \file{setup.py}, and your module,
\file{foo.py}. The archive file will be named \file{Foo-1.0.tar.gz} (or
\file{.zip}), and will unpack into a directory \file{Foo-1.0}.
If an end-user wishes to install your \module{foo} module, all she has
to do is download \file{Foo-1.0.tar.gz} (or \file{.zip}), unpack it,
and---from the \file{Foo-1.0} directory---run
\begin{verbatim}
python setup.py install
\end{verbatim}
which will ultimately copy \file{foo.py} to the appropriate directory
for third-party modules in their Python installation.
This simple example demonstrates some fundamental concepts of the
Distutils: first, both developers and installers have the same basic
user interface, i.e. the setup script. The difference is which
Distutils \emph{commands} they use: the \command{sdist} command is
almost exclusively for module developers, while \command{install} is
more often for installers (although most developers will want to install
their own code occasionally).
If you want to make things really easy for your users, you can create
one or more built distributions for them. For instance, if you are
running on a Windows machine, and want to make things easy for other
Windows users, you can create an executable installer (the most
appropriate type of built distribution for this platform) with the
\command{bdist\_wininst} command. For example:
\begin{verbatim}
python setup.py bdist_wininst
\end{verbatim}
will create an executable installer, \file{Foo-1.0.win32.exe}, in the
current directory.
\XXX{not implemented yet}
(Another way to create executable installers for Windows is with the
\command{bdist\_wise} command, which uses Wise---the commercial
installer-generator used to create Python's own installer---to create
the installer. Wise-based installers are more appropriate for large,
industrial-strength applications that need the full capabilities of a
``real'' installer. \command{bdist\_wininst} creates a self-extracting
zip file with a minimal user interface, which is enough for small- to
medium-sized module collections. You'll need to have version XXX of
Wise installed on your system for the \command{bdist\_wise} command to
work; it's available from \url{http://foo/bar/baz}.)
Currently (Distutils 0.9.2), the are only other useful built
distribution format is RPM, implemented by the \command{bdist\_rpm}
command. For example, the following command will create an RPM file
called \file{Foo-1.0.noarch.rpm}:
\begin{verbatim}
python setup.py bdist_rpm
\end{verbatim}
(This uses the \command{rpm} command, so has to be run on an RPM-based
system such as Red Hat Linux, SuSE Linux, or Mandrake Linux.)
You can find out what distribution formats are available at any time by
running
\begin{verbatim}
python setup.py bdist --help-formats
\end{verbatim}
\subsection{General Python terminology}
\label{python-terms}
If you're reading this document, you probably have a good idea of what
modules, extensions, and so forth are. Nevertheless, just to be sure
that everyone is operating from a common starting point, we offer the
following glossary of common Python terms:
\begin{description}
\item[module] the basic unit of code reusability in Python: a block of
code imported by some other code. Three types of modules concern us
here: pure Python modules, extension modules, and packages.
\item[pure Python module] a module written in Python and contained in a
single \file{.py} file (and possibly associated \file{.pyc} and/or
\file{.pyo} files). Sometimes referred to as a ``pure module.''
\item[extension module] a module written in the low-level language of
the Python implemention: C/C++ for CPython, Java for JPython.
Typically contained in a single dynamically loadable pre-compiled
file, e.g. a shared object (\file{.so}) file for CPython extensions on
Unix, a DLL (given the \file{.pyd} extension) for CPython extensions
on Windows, or a Java class file for JPython extensions. (Note that
currently, the Distutils only handles C/C++ extensions for CPython.)
\item[package] a module that contains other modules; typically contained
in a directory in the filesystem and distinguished from other
directories by the presence of a file \file{\_\_init\_\_.py}.
\item[root package] the root of the hierarchy of packages. (This isn't
really a package, since it doesn't have an \file{\_\_init\_\_.py}
file. But we have to call it something.) The vast majority of the
standard library is in the root package, as are many small, standalone
third-party modules that don't belong to a larger module collection.
Unlike regular packages, modules in the root package can be found in
many directories: in fact, every directory listed in \code{sys.path}
can contribute modules to the root package.
\end{description}
\subsection{Distutils-specific terminology}
\label{distutils-term}
The following terms apply more specifically to the domain of
distributing Python modules using the Distutils:
\begin{description}
\item[module distribution] a collection of Python modules distributed
together as a single downloadable resource and meant to be installed
\emph{en masse}. Examples of some well-known module distributions are
Numeric Python, PyXML, PIL (the Python Imaging Library), or
mxDateTime. (This would be called a \emph{package}, except that term
is already taken in the Python context: a single module distribution
may contain zero, one, or many Python packages.)
\item[pure module distribution] a module distribution that contains only
pure Python modules and packages. Sometimes referred to as a ``pure
distribution.''
\item[non-pure module distribution] a module distribution that contains
at least one extension module. Sometimes referred to as a ``non-pure
distribution.''
\item[distribution root] the top-level directory of your source tree (or
source distribution); the directory where \file{setup.py} exists and
is run from
\end{description}
\section{Writing the Setup Script}
\label{setup-script}
The setup script is the centre of all activity in building,
distributing, and installing modules using the Distutils. The main
purpose of the setup script is to describe your module distribution to
the Distutils, so that the various commands that operate on your modules
do the right thing. As we saw in section~\ref{simple-example} above,
the setup script consists mainly of a call to \function{setup()}, and
most information supplied to the Distutils by the module developer is
supplied as keyword arguments to \function{setup()}.
Here's a slightly more involved example, which we'll follow for the next
couple of sections: the Distutils' own setup script. (Keep in mind that
although the Distutils are included with Python 1.6 and later, they also
have an independent existence so that Python 1.5.2 users can use them to
install other module distributions. The Distutils' own setup script,
shown here, is used to install the package into Python 1.5.2.)
\begin{verbatim}
#!/usr/bin/env python
from distutils.core import setup
setup (name = "Distutils",
version = "1.0",
description = "Python Distribution Utilities",
author = "Greg Ward",
author_email = "[email protected]",
url = "http://www.python.org/sigs/distutils-sig/",
packages = ['distutils', 'distutils.command'],
)
\end{verbatim}
There are only two differences between this and the trivial one-file
distribution presented in section~\ref{simple-example}: more
meta-data, and the specification of pure Python modules by package,
rather than by module. This is important since the Distutils consist of
a couple of dozen modules split into (so far) two packages; an explicit
list of every module would be tedious to generate and difficult to
maintain.
Note that any pathnames (files or directories) supplied in the setup
script should be written using the Unix convention, i.e.
slash-separated. The Distutils will take care of converting this
platform-neutral representation into whatever is appropriate on your
current platform before actually using the pathname. This makes your
setup script portable across operating systems, which of course is one
of the major goals of the Distutils. In this spirit, all pathnames in
this document are slash-separated (Mac OS programmers should keep in
mind that the \emph{absence} of a leading slash indicates a relative
path, the opposite of the Mac OS convention with colons).
\subsection{Listing whole packages}
\label{listing-packages}
The \option{packages} option tells the Distutils to process (build,
distribute, install, etc.) all pure Python modules found in each package
mentioned in the \option{packages} list. In order to do this, of
course, there has to be a correspondence between package names and
directories in the filesystem. The default correspondence is the most
obvious one, i.e. package \module{distutils} is found in the directory
\file{distutils} relative to the distribution root. Thus, when you say
\code{packages = ['foo']} in your setup script, you are promising that
the Distutils will find a file \file{foo/\_\_init\_\_.py} (which might
be spelled differently on your system, but you get the idea) relative to
the directory where your setup script lives. (If you break this
promise, the Distutils will issue a warning but process the broken
package anyways.)
If you use a different convention to lay out your source directory,
that's no problem: you just have to supply the \option{package\_dir}
option to tell the Distutils about your convention. For example, say
you keep all Python source under \file{lib}, so that modules in the
``root package'' (i.e., not in any package at all) are right in
\file{lib}, modules in the \module{foo} package are in \file{lib/foo},
and so forth. Then you would put
\begin{verbatim}
package_dir = {'': 'lib'}
\end{verbatim}
in your setup script. (The keys to this dictionary are package names,
and an empty package name stands for the root package. The values are
directory names relative to your distribution root.) In this case, when
you say \code{packages = ['foo']}, you are promising that the file
\file{lib/foo/\_\_init\_\_.py} exists.
Another possible convention is to put the \module{foo} package right in
\file{lib}, the \module{foo.bar} package in \file{lib/bar}, etc. This
would be written in the setup script as
\begin{verbatim}
package_dir = {'foo': 'lib'}
\end{verbatim}
A \code{\var{package}: \var{dir}} entry in the \option{package\_dir}
dictionary implicitly applies to all packages below \var{package}, so
the \module{foo.bar} case is automatically handled here. In this
example, having \code{packages = ['foo', 'foo.bar']} tells the Distutils
to look for \file{lib/\_\_init\_\_.py} and
\file{lib/bar/\_\_init\_\_.py}. (Keep in mind that although
\option{package\_dir} applies recursively, you must explicitly list all
packages in \option{packages}: the Distutils will \emph{not} recursively
scan your source tree looking for any directory with an
\file{\_\_init\_\_.py} file.)
\subsection{Listing individual modules}
\label{listing-modules}
For a small module distribution, you might prefer to list all modules
rather than listing packages---especially the case of a single module
that goes in the ``root package'' (i.e., no package at all). This
simplest case was shown in section~\ref{simple-example}; here is a
slightly more involved example:
\begin{verbatim}
py_modules = ['mod1', 'pkg.mod2']
\end{verbatim}
This describes two modules, one of them in the ``root'' package, the
other in the \module{pkg} package. Again, the default package/directory
layout implies that these two modules can be found in \file{mod1.py} and
\file{pkg/mod2.py}, and that \file{pkg/\_\_init\_\_.py} exists as well.
And again, you can override the package/directory correspondence using
the \option{package\_dir} option.
\subsection{Describing extension modules}
\label{describing-extensions}
Just as writing Python extension modules is a bit more complicated than
writing pure Python modules, describing them to the Distutils is a bit
more complicated. Unlike pure modules, it's not enough just to list
modules or packages and expect the Distutils to go out and find the
right files; you have to specify the extension name, source file(s), and
any compile/link requirements (include directories, libraries to link
with, etc.).
All of this is done through another keyword argument to
\function{setup()}, the \option{extensions} option. \option{extensions}
is just a list of \class{Extension} instances, each of which describes a
single extension module. Suppose your distribution includes a single
extension, called \module{foo} and implemented by \file{foo.c}. If no
additional instructions to the compiler/linker are needed, describing
this extension is quite simple:
\begin{verbatim}
Extension("foo", ["foo.c"])
\end{verbatim}
The \class{Extension} class can be imported from
\module{distutils.core}, along with \function{setup()}. Thus, the setup
script for a module distribution that contains only this one extension
and nothing else might be:
\begin{verbatim}
from distutils.core import setup, Extension
setup(name = "foo", version = "1.0",
ext_modules = [Extension("foo", ["foo.c"])])
\end{verbatim}
The \class{Extension} class (actually, the underlying extension-building
machinery implemented by the \command{built\_ext} command) supports a
great deal of flexibility in describing Python extensions, which is
explained in the following sections.
\subsubsection{Extension names and packages}
The first argument to the \class{Extension} constructor is always the
name of the extension, including any package names. For example,
\begin{verbatim}
Extension("foo", ["src/foo1.c", "src/foo2.c"])
\end{verbatim}
describes an extension that lives in the root package, while
\begin{verbatim}
Extension("pkg.foo", ["src/foo1.c", "src/foo2.c"])
\end{verbatim}
describes the same extension in the \module{pkg} package. The source
files and resulting object code are identical in both cases; the only
difference is where in the filesystem (and therefore where in Python's
namespace hierarchy) the resulting extension lives.
If you have a number of extensions all in the same package (or all under
the same base package), use the \option{ext\_package} keyword argument
to \function{setup()}. For example,
\begin{verbatim}
setup(...
ext_package = "pkg",
ext_modules = [Extension("foo", ["foo.c"]),
Extension("subpkg.bar", ["bar.c"])]
)
\end{verbatim}
will compile \file{foo.c} to the extension \module{pkg.foo}, and
\file{bar.c} to \module{pkg.subpkg.bar}.
\subsubsection{Extension source files}
The second argument to the \class{Extension} constructor is a list of
source files. Since the Distutils currently only support C/C++
extensions, these are normally C/C++ source files. (Be sure to use
appropriate extensions to distinguish C++ source files: \file{.cc} and
\file{.cpp} seem to be recognized by both Unix and Windows compilers.)
However, you can also include SWIG interface (\file{.i}) files in the
list; the \command{build\_ext} command knows how to deal with SWIG
extensions: it will run SWIG on the interface file and compile the
resulting C/C++ file into your extension.
\XXX{SWIG support is rough around the edges and largely untested;
especially SWIG support of C++ extensions! Explain in more detail
here when the interface firms up.}
On some platforms, you can include non-source files that are processed
by the compiler and included in your extension. Currently, this just
means Windows resource files for Visual C++. \XXX{get more detail on
this feature from Thomas Heller!}
\subsubsection{Preprocessor options}
Three optional arguments to \class{Extension} will help if you need to
specify include directories to search or preprocessor macros to
define/undefine: \code{include\_dirs}, \code{define\_macros}, and
\code{undef\_macros}.
For example, if your extension requires header files in the
\file{include} directory under your distribution root, use the
\code{include\_dirs} option:
\begin{verbatim}
Extension("foo", ["foo.c"], include_dirs=["include"])
\end{verbatim}
You can specify absolute directories there; if you know that your
extension will only be built on Unix systems with X11R6 installed to
\file{/usr}, you can get away with
\begin{verbatim}
Extension("foo", ["foo.c"], include_dirs=["/usr/include/X11"])
\end{verbatim}
You should avoid this sort of non-portable usage if you plan to
distribute your code: it's probably better to write your code to include
(e.g.) \code{<X11/Xlib.h>}.
If you need to include header files from some other Python extension,
you can take advantage of the fact that the Distutils install extension
header files in a consistent way. For example, the Numerical Python
header files are installed (on a standard Unix installation) to
\file{/usr/local/include/python1.5/Numerical}. (The exact location will
differ according to your platform and Python installation.) Since the
Python include directory---\file{/usr/local/include/python1.5} in this
case---is always included in the search path when building Python
extensions, the best approach is to include (e.g.)
\code{<Numerical/arrayobject.h>}. If you insist on putting the
\file{Numerical} include directory right into your header search path,
though, you can find that directory using the Distutils
\module{sysconfig} module:
\begin{verbatim}
from distutils.sysconfig import get_python_inc
incdir = os.path.join(get_python_inc(plat_specific=1), "Numerical")
setup(...,
Extension(..., include_dirs=[incdir]))
\end{verbatim}
Even though this is quite portable---it will work on any Python
installation, regardless of platform---it's probably easier to just
write your C code in the sensible way.
You can define and undefine pre-processor macros with the
\code{define\_macros} and \code{undef\_macros} options.
\code{define\_macros} takes a list of \code{(name, value)} tuples, where
\code{name} is the name of the macro to define (a string) and
\code{value} is its value: either a string or \code{None}. (Defining a
macro \code{FOO} to \code{None} is the equivalent of a bare
\code{\#define FOO} in your C source: with most compilers, this sets
\code{FOO} to the string \code{1}.) \code{undef\_macros} is just
a list of macros to undefine.
For example:
\begin{verbatim}
Extension(...,
define_macros=[('NDEBUG', '1')],
('HAVE_STRFTIME', None),
undef_macros=['HAVE_FOO', 'HAVE_BAR'])
\end{verbatim}
is the equivalent of having this at the top of every C source file:
\begin{verbatim}
#define NDEBUG 1
#define HAVE_STRFTIME
#undef HAVE_FOO
#undef HAVE_BAR
\end{verbatim}
\subsubsection{Library options}
You can also specify the libraries to link against when building your
extension, and the directories to search for those libraries. The
\code{libraries} option is a list of libraries to link against,
\code{library\_dirs} is a list of directories to search for libraries at
link-time, and \code{runtime\_library\_dirs} is a list of directories to
search for shared (dynamically loaded) libraries at run-time.
For example, if you need to link against libraries known to be in the
standard library search path on target systems
\begin{verbatim}
Extension(...,
libraries=["gdbm", "readline"])
\end{verbatim}
If you need to link with libraries in a non-standard location, you'll
have to include the location in \code{library\_dirs}:
\begin{verbatim}
Extension(...,
library_dirs=["/usr/X11R6/lib"],
libraries=["X11", "Xt"])
\end{verbatim}
(Again, this sort of non-portable construct should be avoided if you
intend to distribute your code.)
\XXX{still undocumented: extra\_objects, extra\_compile\_args,
extra\_link\_args, export\_symbols---none of which are frequently
needed, some of which might be completely unnecessary!}
\section{Writing the Setup Configuration File}
\label{setup-config}
Often, it's not possible to write down everything needed to build a
distribution \emph{a priori}: you may need to get some information from
the user, or from the user's system, in order to proceed. As long as
that information is fairly simple---a list of directories to search for
C header files or libraries, for example---then providing a
configuration file, \file{setup.cfg}, for users to edit is a cheap and
easy way to solicit it. Configuration files also let you provide
default values for any command option, which the installer can then
override either on the command-line or by editing the config file.
(If you have more advanced needs, such as determining which extensions
to build based on what capabilities are present on the target system,
then you need the Distutils ``auto-configuration'' facility. This
started to appear in Distutils 0.9 but, as of this writing, isn't mature
or stable enough yet for real-world use.)
\XXX{should reference description of distutils config files in
``Installing'' manual here}
The setup configuration file is a useful middle-ground between the setup
script---which, ideally, would be opaque to installers\footnote{This
ideal probably won't be achieved until auto-configuration is fully
supported by the Distutils.}---and the command-line to the setup
script, which is outside of your control and entirely up to the
installer. In fact, \file{setup.cfg} (and any other Distutils
configuration files present on the target system) are processed after
the contents of the setup script, but before the command-line. This has
several useful consequences:
\begin{itemize}
\item installers can override some of what you put in \file{setup.py} by
editing \file{setup.cfg}
\item you can provide non-standard defaults for options that are not
easily set in \file{setup.py}
\item installers can override anything in \file{setup.cfg} using the
command-line options to \file{setup.py}
\end{itemize}
The basic syntax of the configuration file is simple:
\begin{verbatim}
[command]
option=value
...
\end{verbatim}
where \var{command} is one of the Distutils commands (e.g.
\command{build\_py}, \command{install}), and \var{option} is one of the
options that command supports. Any number of options can be supplied
for each command, and any number of command sections can be included in
the file. Blank lines are ignored, as are comments (from a \verb+#+
character to end-of-line). Long option values can be split across
multiple lines simply by indenting the continuation lines.
You can find out the list of options supported by a particular command
with the universal \longprogramopt{help} option, e.g.
\begin{verbatim}
> python setup.py --help build_ext
[...]
Options for 'build_ext' command:
--build-lib (-b) directory for compiled extension modules
--build-temp (-t) directory for temporary files (build by-products)
--inplace (-i) ignore build-lib and put compiled extensions into the
source directory alongside your pure Python modules
--include-dirs (-I) list of directories to search for header files
--define (-D) C preprocessor macros to define
--undef (-U) C preprocessor macros to undefine
[...]
\end{verbatim}
Or consult section \ref{reference} of this document (the command
reference).
Note that an option spelled \longprogramopt{foo-bar} on the command-line
is spelled \option{foo\_bar} in configuration files.
For example, say you want your extensions to be built
``in-place''---that is, you have an extension \module{pkg.ext}, and you
want the compiled extension file (\file{ext.so} on Unix, say) to be put
in the same source directory as your pure Python modules
\module{pkg.mod1} and \module{pkg.mod2}. You can always use the
\longprogramopt{inplace} option on the command-line to ensure this:
\begin{verbatim}
python setup.py build_ext --inplace
\end{verbatim}
But this requires that you always specify the \command{build\_ext}
command explicitly, and remember to provide \longprogramopt{inplace}.
An easier way is to ``set and forget'' this option, by encoding it in
\file{setup.cfg}, the configuration file for this distribution:
\begin{verbatim}
[build_ext]
inplace=1
\end{verbatim}
This will affect all builds of this module distribution, whether or not
you explcitly specify \command{build\_ext}. If you include
\file{setup.cfg} in your source distribution, it will also affect
end-user builds---which is probably a bad idea for this option, since
always building extensions in-place would break installation of the
module distribution. In certain peculiar cases, though, modules are
built right in their installation directory, so this is conceivably a
useful ability. (Distributing extensions that expect to be built in
their installation directory is almost always a bad idea, though.)
Another example: certain commands take a lot of options that don't
change from run-to-run; for example, \command{bdist\_rpm} needs to know
everything required to generate a ``spec'' file for creating an RPM
distribution. Some of this information comes from the setup script, and
some is automatically generated by the Distutils (such as the list of
files installed). But some of it has to be supplied as options to
\command{bdist\_rpm}, which would be very tedious to do on the
command-line for every run. Hence, here is a snippet from the
Distutils' own \file{setup.cfg}:
\begin{verbatim}
[bdist_rpm]
release = 1
packager = Greg Ward <[email protected]>
doc_files = CHANGES.txt
README.txt
USAGE.txt
doc/
examples/
\end{verbatim}
Note that the \option{doc\_files} option is simply a
whitespace-separated string split across multiple lines for readability.
\section{Creating a Source Distribution}
\label{source-dist}
As shown in section~\ref{simple-example}, you use the
\command{sdist} command to create a source distribution. In the
simplest case,
\begin{verbatim}
python setup.py sdist
\end{verbatim}
(assuming you haven't specified any \command{sdist} options in the setup
script or config file), \command{sdist} creates the archive of the
default format for the current platform. The default format is gzip'ed
tar file (\file{.tar.gz}) on Unix, and ZIP file on Windows. \XXX{no Mac
OS support here}
You can specify as many formats as you like using the
\longprogramopt{formats} option, for example:
\begin{verbatim}
python setup.py sdist --formats=gztar,zip
\end{verbatim}
to create a gzipped tarball and a zip file. The available formats are:
\begin{tableiii}{l|l|c}{code}%
{Format}{Description}{Notes}
\lineiii{zip}{zip file (\file{.zip})}{(1),(3)}
\lineiii{gztar}{gzip'ed tar file (\file{.tar.gz})}{(2),(4)}
\lineiii{bztar}{bzip2'ed tar file (\file{.tar.gz})}{(4)}
\lineiii{ztar}{compressed tar file (\file{.tar.Z})}{(4)}
\lineiii{tar}{tar file (\file{.tar})}{(4)}
\end{tableiii}
\noindent Notes:
\begin{description}
\item[(1)] default on Windows
\item[(2)] default on Unix
\item[(3)] requires either external \program{zip} utility or
\module{zipfile} module (not part of the standard Python library)
\item[(4)] requires external utilities: \program{tar} and possibly one
of \program{gzip}, \program{bzip2}, or \program{compress}
\end{description}
\subsection{Specifying the files to distribute}
\label{manifest}
If you don't supply an explicit list of files (or instructions on how to
generate one), the \command{sdist} command puts a minimal default set
into the source distribution:
\begin{itemize}
\item all Python source files implied by the \option{py\_modules} and
\option{packages} options
\item all C source files mentioned in the \option{ext\_modules} or
\option{libraries} options (\XXX{getting C library sources currently
broken -- no get\_source\_files() method in build\_clib.py!})
\item anything that looks like a test script: \file{test/test*.py}
(currently, the Distutils don't do anything with test scripts except
include them in source distributions, but in the future there will be
a standard for testing Python module distributions)
\item \file{README.txt} (or \file{README}), \file{setup.py} (or whatever
you called your setup script), and \file{setup.cfg}
\end{itemize}
Sometimes this is enough, but usually you will want to specify
additional files to distribute. The typical way to do this is to write
a \emph{manifest template}, called \file{MANIFEST.in} by default. The
manifest template is just a list of instructions for how to generate
your manifest file, \file{MANIFEST}, which is the exact list of files to
include in your source distribution. The \command{sdist} command
processes this template and generates a manifest based on its
instructions and what it finds in the filesystem.
If you prefer to roll your own manifest file, the format is simple: one
filename per line, regular files (or symlinks to them) only. If you do
supply your own \file{MANIFEST}, you must specify everything: the
default set of files described above does not apply in this case.
The manifest template has one command per line, where each command
specifies a set of files to include or exclude from the source
distribution. For an example, again we turn to the Distutils' own
manifest template:
\begin{verbatim}
include *.txt
recursive-include examples *.txt *.py
prune examples/sample?/build
\end{verbatim}
The meanings should be fairly clear: include all files in the
distribution root matching \code{*.txt}, all files anywhere under the
\file{examples} directory matching \code{*.txt} or \code{*.py}, and
exclude all directories matching \code{examples/sample?/build}. All of
this is done \emph{after} the standard include set, so you can exclude
files from the standard set with explicit instructions in the manifest
template. (Or, you can use the \longprogramopt{no-defaults} option to
disable the standard set entirely.) There are several other commands
available in the manifest template mini-language; see
section~\ref{sdist-cmd}.
The order of commands in the manifest template matters: initially, we
have the list of default files as described above, and each command in
the template adds to or removes from that list of files. Once we have
fully processed the manifest template, we remove files that should not
be included in the source distribution:
\begin{itemize}
\item all files in the Distutils ``build'' tree (default \file{build/})
\item all files in directories named \file{RCS} or \file{CVS}
\end{itemize}
Now we have our complete list of files, which is written to the manifest
for future reference, and then used to build the source distribution
archive(s).
You can disable the default set of included files with the
\longprogramopt{no-defaults} option, and you can disable the standard
exclude set with \longprogramopt{no-prune}.
Following the Distutils' own manifest template, let's trace how the
\command{sdist} command builds the list of files to include in the
Distutils source distribution:
\begin{enumerate}
\item include all Python source files in the \file{distutils} and
\file{distutils/command} subdirectories (because packages
corresponding to those two directories were mentioned in the
\option{packages} option in the setup script---see
section~\ref{setup-script})
\item include \file{README.txt}, \file{setup.py}, and \file{setup.cfg}
(standard files)
\item include \file{test/test*.py} (standard files)
\item include \file{*.txt} in the distribution root (this will find
\file{README.txt} a second time, but such redundancies are weeded out
later)
\item include anything matching \file{*.txt} or \file{*.py} in the
sub-tree under \file{examples},
\item exclude all files in the sub-trees starting at directories
matching \file{examples/sample?/build}---this may exclude files
included by the previous two steps, so it's important that the
\code{prune} command in the manifest template comes after the
\code{recursive-include} command
\item exclude the entire \file{build} tree, and any \file{RCS} or
\file{CVS} directories
\end{enumerate}
Just like in the setup script, file and directory names in the manifest
template should always be slash-separated; the Distutils will take care
of converting them to the standard representation on your platform.
That way, the manifest template is portable across operating systems.
\subsection{Manifest-related options}
\label{manifest-options}
The normal course of operations for the \command{sdist} command is as
follows:
\begin{itemize}
\item if the manifest file, \file{MANIFEST} doesn't exist, read
\file{MANIFEST.in} and create the manifest
\item if neither \file{MANIFEST} nor \file{MANIFEST.in} exist, create a
manifest with just the default file set\footnote{In versions of the
Distutils up to and including 0.9.2 (Python 2.0b1), this feature was
broken; use the \programopt{-f} (\longprogramopt{force-manifest})
option to work around the bug.}
\item if either \file{MANIFEST.in} or the setup script (\file{setup.py})
are more recent than \file{MANIFEST}, recreate \file{MANIFEST} by
reading \file{MANIFEST.in}
\item use the list of files now in \file{MANIFEST} (either just
generated or read in) to create the source distribution archive(s)
\end{itemize}
There are a couple of options that modify this behaviour. First, use
the \longprogramopt{no-defaults} and \longprogramopt{no-prune} to
disable the standard ``include'' and ``exclude'' sets.\footnote{Note
that if you have no manifest template, no manifest, and use the
\longprogramopt{no-defaults}, you will get an empty manifest. Another
bug in Distutils 0.9.2 and earlier causes an uncaught exception in
this case. The workaround is: Don't Do That.}
Second, you might want to force the manifest to be regenerated---for
example, if you have added or removed files or directories that match an
existing pattern in the manifest template, you should regenerate the
manifest:
\begin{verbatim}
python setup.py sdist --force-manifest
\end{verbatim}
Or, you might just want to (re)generate the manifest, but not create a
source distribution:
\begin{verbatim}
python setup.py sdist --manifest-only
\end{verbatim}
\longprogramopt{manifest-only} implies \longprogramopt{force-manifest}.
\programopt{-o} is a shortcut for \longprogramopt{manifest-only}, and
\programopt{-f} for \longprogramopt{force-manifest}.
\section{Creating Built Distributions}
\label{built-dist}
A ``built distribution'' is what you're probably used to thinking of
either as a ``binary package'' or an ``installer'' (depending on your
background). It's not necessarily binary, though, because it might
contain only Python source code and/or byte-code; and we don't call it a
package, because that word is already spoken for in Python. (And
``installer'' is a term specific to the Windows world. \XXX{do Mac
people use it?})
A built distribution is how you make life as easy as possible for
installers of your module distribution: for users of RPM-based Linux
systems, it's a binary RPM; for Windows users, it's an executable
installer; for Debian-based Linux users, it's a Debian package; and so
forth. Obviously, no one person will be able to create built
distributions for every platform under the sun, so the Distutils are
designed to enable module developers to concentrate on their
specialty---writing code and creating source distributions---while an
intermediary species of \emph{packager} springs up to turn source
distributions into built distributions for as many platforms as there
are packagers.
Of course, the module developer could be his own packager; or the
packager could be a volunteer ``out there'' somewhere who has access to
a platform which the original developer does not; or it could be
software periodically grabbing new source distributions and turning them
into built distributions for as many platforms as the software has
access to. Regardless of the nature of the beast, a packager uses the
setup script and the \command{bdist} command family to generate built
distributions.
As a simple example, if I run the following command in the Distutils
source tree:
\begin{verbatim}
python setup.py bdist
\end{verbatim}
then the Distutils builds my module distribution (the Distutils itself
in this case), does a ``fake'' installation (also in the \file{build}
directory), and creates the default type of built distribution for my
platform. The default format for built distributions is a ``dumb'' tar
file on Unix, and an simple executable installer on Windows. (That tar
file is considered ``dumb'' because it has to be unpacked in a specific
location to work.)
Thus, the above command on a Unix system creates
\file{Distutils-0.9.1.\filevar{plat}.tar.gz}; unpacking this tarball
from the right place installs the Distutils just as though you had
downloaded the source distribution and run \code{python setup.py
install}. (The ``right place'' is either the root of the filesystem or
Python's \filevar{prefix} directory, depending on the options given to
the \command{bdist\_dumb} command; the default is to make dumb
distributions relative to \filevar{prefix}.)
Obviously, for pure Python distributions, this isn't a huge win---but
for non-pure distributions, which include extensions that would need to
be compiled, it can mean the difference between someone being able to
use your extensions or not. And creating ``smart'' built distributions,
such as an RPM package or an executable installer for Windows, is a big
win for users even if your distribution doesn't include any extensions.
The \command{bdist} command has a \longprogramopt{formats} option,
similar to the \command{sdist} command, which you can use to select the
types of built distribution to generate: for example,
\begin{verbatim}
python setup.py bdist --format=zip
\end{verbatim}
would, when run on a Unix system, create
\file{Distutils-0.8.\filevar{plat}.zip}---again, this archive would be
unpacked from the root directory to install the Distutils.
The available formats for built distributions are:
\begin{tableiii}{l|l|c}{code}%
{Format}{Description}{Notes}
\lineiii{gztar}{gzipped tar file (\file{.tar.gz})}{(1),(3)}
\lineiii{ztar}{compressed tar file (\file{.tar.Z})}{(3)}
\lineiii{tar}{tar file (\file{.tar})}{(3)}
\lineiii{zip}{zip file (\file{.zip})}{(4)}
\lineiii{rpm}{RPM}{(5)}
\lineiii{srpm}{source RPM}{(5) \XXX{to do!}}
\lineiii{wininst}{self-extracting ZIP file for Windows}{(2),(6)}
%\lineiii{wise}{Wise installer for Windows}{(3)}
\end{tableiii}
\noindent Notes:
\begin{description}
\item[(1)] default on Unix
\item[(2)] default on Windows \XXX{to-do!}
\item[(3)] requires external utilities: \program{tar} and possibly one
of \program{gzip}, \program{bzip2}, or \program{compress}
\item[(4)] requires either external \program{zip} utility or
\module{zipfile} module (not part of the standard Python library)
\item[(5)] requires external \program{rpm} utility, version 3.0.4 or
better (use \code{rpm --version} to find out which version you have)
\item[(6)] \XXX{requirements for \command{bdist\_wininst}?}
%\item[(3)] not implemented yet
\end{description}
You don't have to use the \command{bdist} command with the
\longprogramopt{formats} option; you can also use the command that
directly implements the format you're interested in. Some of these
\command{bdist} ``sub-commands'' actually generate several similar
formats; for instance, the \command{bdist\_dumb} command generates all
the ``dumb'' archive formats (\code{tar}, \code{ztar}, \code{gztar}, and
\code{zip}), and \command{bdist\_rpm} generates both binary and source
RPMs. The \command{bdist} sub-commands, and the formats generated by
each, are:
\begin{tableii}{l|l}{command}%
{Command}{Formats}
\lineii{bdist\_dumb}{tar, ztar, gztar, zip}
\lineii{bdist\_rpm}{rpm, srpm}
\lineii{bdist\_wininst}{wininst}
%\lineii{bdist\_wise}{wise}
\end{tableii}
The following sections give details on the individual \command{bdist\_*}
commands.
\subsection{Creating dumb built distributions}
\label{creating-dumb}
\XXX{Need to document absolute vs. prefix-relative packages here, but
first I have to implement it!}
\subsection{Creating RPM packages}
\label{creating-rpms}
The RPM format is used by many of popular Linux distributions, including
Red Hat, SuSE, and Mandrake. If one of these (or any of the other
RPM-based Linux distributions) is your usual environment, creating RPM
packages for other users of that same distribution is trivial.
Depending on the complexity of your module distribution and differences
between Linux distributions, you may also be able to create RPMs that
work on different RPM-based distributions.
The usual way to create an RPM of your module distribution is to run the
\command{bdist\_rpm} command:
\begin{verbatim}
python setup.py bdist_rpm
\end{verbatim}
or the \command{bdist} command with the \longprogramopt{format} option:
\begin{verbatim}
python setup.py bdist --formats=rpm
\end{verbatim}
The former allows you to specify RPM-specific options; the latter allows
you to easily specify multiple formats in one run. If you need to do
both, you can explicitly specify multiple \command{bdist\_*} commands
and their options:
\begin{verbatim}
python setup.py bdist_rpm --packager="John Doe <[email protected]>" \
bdist_wininst --target_version="2.0"
\end{verbatim}
Creating RPM packages is driven by a \file{.spec} file, much as using
the Distutils is driven by the setup script. To make your life easier,
the \command{bdist\_rpm} command normally creates a \file{.spec} file
based on the information you supply in the setup script, on the command
line, and in any Distutils configuration files. Various options and
section in the \file{.spec} file are derived from options in the setup
script as follows:
\begin{tableii}{l|l}{textrm}%
{RPM \file{.spec} file option or section}{Distutils setup script option}
\lineii{Name}{\option{name}}
\lineii{Summary (in preamble)}{\option{description}}
\lineii{Version}{\option{version}}
\lineii{Vendor}{\option{author} and \option{author\_email}, or \\&
\option{maintainer} and \option{maintainer\_email}}
\lineii{Copyright}{\option{licence}}
\lineii{Url}{\option{url}}
\lineii{\%description (section)}{\option{long\_description}}
\end{tableii}
Additionally, there many options in \file{.spec} files that don't have
corresponding options in the setup script. Most of these are handled
through options to the \command{bdist\_rpm} command as follows:
\begin{tableiii}{l|l|l}{textrm}%
{RPM \file{.spec} file option or section}%
{\command{bdist\_rpm} option}%
{default value}
\lineiii{Release}{\option{release}}{``1''}
\lineiii{Group}{\option{group}}{``Development/Libraries''}
\lineiii{Vendor}{\option{vendor}}{(see above)}
\lineiii{Packager}{packager}{(none)}
\lineiii{Provides}{provides}{(none)}
\lineiii{Requires}{requires}{(none)}
\lineiii{Conflicts}{conflicts}{(none)}
\lineiii{Obsoletes}{obsoletes}{(none)}
\lineiii{Distribution}{\option{distribution\_name}}{(none)}
\lineiii{BuildRequires}{\option{build\_requires}}{(none)}
\lineiii{Icon}{\option{icon}}{(none)}
\end{tableiii}
Obviously, supplying even a few of these options on the command-line
would be tedious and error-prone, so it's usually best to put them in
the setup configuration file, \file{setup.cfg}---see
section~\ref{setup-config}. If you distribute or package many Python
module distributions, you might want to put options that apply to all of
them in your personal Distutils configuration file
(\file{\textasciitilde/.pydistutils.cfg}).
There are three steps to building a binary RPM package, all of which are
handled automatically by the Distutils:
\begin{enumerate}
\item create a \file{.spec} file, which describes the package (analogous
to the Distutils setup script; in fact, much of the information in the
setup script winds up in the \file{.spec} file)
\item create the source RPM
\item create the ``binary'' RPM (which may or may not contain binary
code, depending on whether your module distribution contains Python
extensions)
\end{enumerate}
Normally, RPM bundles the last two steps together; when you use the
Distutils, all three steps are typically bundled together.
If you wish, you can separate these three steps. You can use the
\longprogramopt{spec-only} option to make \command{bdist\_rpm} just
create the \file{.spec} file and exit; in this case, the \file{.spec}
file will be written to the ``distribution directory''---normally
\file{dist/}, but customizable with the \longprogramopt{dist-dir}
option. (Normally, the \file{.spec} file winds up deep in the ``build
tree,'' in a temporary directory created by \command{bdist\_rpm}.)
\XXX{this isn't implemented yet---is it needed?!}
You can also specify a custom \file{.spec} file with the
\longprogramopt{spec-file} option; used in conjunctin with
\longprogramopt{spec-only}, this gives you an opportunity to customize
the \file{.spec} file manually:
\begin{verbatim}
> python setup.py bdist_rpm --spec-only
# ...edit dist/FooBar-1.0.spec
> python setup.py bdist_rpm --spec-file=dist/FooBar-1.0.spec
\end{verbatim}
(Although a better way to do this is probably to override the standard
\command{bdist\_rpm} command with one that writes whatever else you want
to the \file{.spec} file; see section~\ref{extending} for information on
extending the Distutils.)
\subsection{Creating Windows installers}
\label{creating-wininst}
\section{Examples}
\label{examples}
\subsection{Pure Python distribution (by module)}
\label{pure-mod}
\subsection{Pure Python distribution (by package)}
\label{pure-pkg}
\subsection{Single extension module}
\label{single-ext}
\subsection{Multiple extension modules}
\label{multiple-ext}
\subsection{Putting it all together}
\section{Extending the Distutils}
\label{extending}
\subsection{Extending existing commands}
\label{extend-existing}
\subsection{Writing new commands}
\label{new-commands}
\section{Reference}
\label{reference}
\subsection{Building modules: the \protect\command{build} command family}
\label{build-cmds}
\subsubsection{\protect\command{build}}
\label{build-cmd}
\subsubsection{\protect\command{build\_py}}
\label{build-py-cmd}
\subsubsection{\protect\command{build\_ext}}
\label{build-ext-cmd}
\subsubsection{\protect\command{build\_clib}}
\label{build-clib-cmd}
\subsection{Installing modules: the \protect\command{install} command family}
\label{install-cmd}
The install command ensures that the build commands have been run and then
runs the subcommands \command{install\_lib},
\command{install\_data} and
\command{install\_scripts}.
\subsubsection{\protect\command{install\_lib}}
\label{install-lib-cmd}
\subsubsection{\protect\command{install\_data}}
\label{install-data-cmd}
This command installs all data files provided with the distribution.
\subsubsection{\protect\command{install\_scripts}}
\label{install-scripts-cmd}
This command installs all (Python) scripts in the distribution.
\subsection{Cleaning up: the \protect\command{clean} command}
\label{clean-cmd}
\subsection{Creating a source distribution: the \protect\command{sdist} command}
\label{sdist-cmd}
\XXX{fragment moved down from above: needs context!}
The manifest template commands are:
\begin{tableii}{ll}{command}{Command}{Description}
\lineii{include \var{pat1} \var{pat2} ... }
{include all files matching any of the listed patterns}
\lineii{exclude \var{pat1} \var{pat2} ... }
{exclude all files matching any of the listed patterns}
\lineii{recursive-include \var{dir} \var{pat1} \var{pat2} ... }
{include all files under \var{dir} matching any of the listed patterns}
\lineii{recursive-exclude \var{dir} \var{pat1} \var{pat2} ...}
{exclude all files under \var{dir} matching any of the listed patterns}
\lineii{global-include \var{pat1} \var{pat2} ...}
{include all files anywhere in the source tree matching\\&
any of the listed patterns}
\lineii{global-exclude \var{pat1} \var{pat2} ...}
{exclude all files anywhere in the source tree matching\\&
any of the listed patterns}
\lineii{prune \var{dir}}{exclude all files under \var{dir}}
\lineii{graft \var{dir}}{include all files under \var{dir}}
\end{tableii}
The patterns here are Unix-style ``glob'' patterns: \code{*} matches any
sequence of regular filename characters, \code{?} matches any single
regular filename character, and \code{[\var{range}]} matches any of the
characters in \var{range} (e.g., \code{a-z}, \code{a-zA-Z},
\code{a-f0-9\_.}). The definition of ``regular filename character'' is
platform-specific: on Unix it is anything except slash; on Windows
anything except backslash or colon; on Mac OS anything except colon.
\XXX{Windows and Mac OS support not there yet}
\subsection{Creating a ``built'' distribution: the
\protect\command{bdist} command family}
\label{bdist-cmds}
\subsubsection{\protect\command{blib}}
\subsubsection{\protect\command{blib\_dumb}}
\subsubsection{\protect\command{blib\_rpm}}
\subsubsection{\protect\command{blib\_wise}}
\end{document}
| {
"alphanum_fraction": 0.7649726088,
"avg_line_length": 43.0533864542,
"ext": "tex",
"hexsha": "07a0ab1d21b95f121d22a33170510c0649f20e0b",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-27T01:55:17.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-07-16T08:14:13.000Z",
"max_forks_repo_head_hexsha": "2a9b0a93091b9ef7350a94bb3d3f1c43725b7a8c",
"max_forks_repo_licenses": [
"PSF-2.0"
],
"max_forks_repo_name": "tiran/cpython",
"max_forks_repo_path": "Doc/dist/dist.tex",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "2a9b0a93091b9ef7350a94bb3d3f1c43725b7a8c",
"max_issues_repo_issues_event_max_datetime": "2021-05-03T21:20:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-11-18T15:48:14.000Z",
"max_issues_repo_licenses": [
"PSF-2.0"
],
"max_issues_repo_name": "tiran/cpython",
"max_issues_repo_path": "Doc/dist/dist.tex",
"max_line_length": 80,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "2a9b0a93091b9ef7350a94bb3d3f1c43725b7a8c",
"max_stars_repo_licenses": [
"PSF-2.0"
],
"max_stars_repo_name": "tiran/cpython",
"max_stars_repo_path": "Doc/dist/dist.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T21:47:20.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-26T21:53:36.000Z",
"num_tokens": 13522,
"size": 54032
} |
\section{Conclusion}
\input{conclusion-nafiseh}
\input{conclusion-weike}
\subsection{Capture-HPC}
Capture-HPC is a high interaction client honeypot, a system designed to actively
search for malware by rendering the content at provided URLs. In addition to
offering a accurate simulation of a user browsing the web Capture-HPC provide
much flexibility, capable of automatically rendering content using a variety of
client programs including web browsers such as Internet Explorer and Firefox,
and the PDF viewer Adobe Reader. It also allows a degree of freedom in choosing
the sensitivity of the malware detection using the exclusion lists, a feature
that also makes the process of porting Capture-HPC to other Windows OSs
considerably simpler.
Capture-HPC was found to be fairly slow, justifiable by the amount of work that
must happen to render the content in a real web browser. The issue of speed
meant that Capture-HPC has been integrated into the framework with some care,
restricting the number of URLs passed to it via classification to attempt to
limit the execution time for a set of URLs. Despite this Capture-HPC still
remains one of the more powerful malware scanners available in the framework.
\subsection{Framework}
Using a distributed framework as a foundation for the project proved successful,
although issues arising from the complexity of adapting code to run on the
framework detracted somewhat from its effectiveness. The framework as
implemented using Celery, RabbitMQ and Redis was capable of effectively
distributing tasks to workers using routing features provided to route specific
tasks to a specific worker, needed by the Capture-HPC malware scanner which
requires configuration and resources outside of the scope of the framework.
Caching was also effectively used to reduce duplicate requests, with most of the
workers implementing Redis caching of URLs mapped to results.
\subsection{Project Conclusions}
Unfortunately, the project did not sufficiently progress to test the hypothesis:
is malware specifically targeted at trending topics. This would have required
considerable time to collect data, and the reliability of the project in
classifying URLs as malicious or benign has not yet been determined as this is a
property of the combination of malware scanners used and confidence ratings
applied to the scanners. A small suite of scanners has been assembled and
tested, including web lists and Capture-HPC.
The project has successfully combined distributed task framework with trend
sources, search retrieval, and malware scanning and the end result is a
re-usable framework appropriate for carrying out further investigation into th
hypothesis. It is hoped that some data collected and a preliminary conclusion to
the hypothesis can be found in time for the final presentation to the customer.
| {
"alphanum_fraction": 0.8253689389,
"avg_line_length": 54.7307692308,
"ext": "tex",
"hexsha": "d20ea8c353926329ece3222857a3c67e00248f3f",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cb9633baff8752f3e043a2cfdb91cde868666ae2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chrissorchard/malucrawl",
"max_forks_repo_path": "doc/report/conclusion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cb9633baff8752f3e043a2cfdb91cde868666ae2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "chrissorchard/malucrawl",
"max_issues_repo_path": "doc/report/conclusion.tex",
"max_line_length": 80,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "cb9633baff8752f3e043a2cfdb91cde868666ae2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chrissorchard/malucrawl",
"max_stars_repo_path": "doc/report/conclusion.tex",
"max_stars_repo_stars_event_max_datetime": "2016-12-09T03:02:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-12-09T03:02:05.000Z",
"num_tokens": 557,
"size": 2846
} |
\vspace{\baselineskip}\chapter*{Appendix A: \\ Social, economic and environmental impact of the thesis}
\addcontentsline{toc}{chapter}{Appendix A: Social, economic and environmental impact of the thesis}
National infrastructure systems form the basis for the economy of a country. Building infrastructure is a long-term commitment that is very difficult to reverse, which means that infrastructure decisions have major implications for sustainability. Moreover, these types of investments require very significant capital that have long lead-times and lifetimes and, therefore, investments that are taking place right now will lock in patterns of development for decades to come. The ITRC project is creating a set of models to assist in the planning and design of national infrastructure.\par
NISMOD (National Infrastructure Systems MODel) is the infrastructure system of systems modelling platform of the ITRC and one of its models is MINERVA, which is in charge of modelling the communications subsystem and where this Master’s Thesis is framed.\par
Right now, complex decisions about future telecommunications networks are taken just using traditional deployment models that are isolated from other national infrastructures or constraints. This work is aimed to contribute to creating this great platform that will allow taking decisions with a much broader view.\par
From the economic point of view, this paper will contribute to a faster development of the telecom operators since they will have a better understanding of the impact of their long-term investments. Additionally, it will reduce the risks of carrying out some telecommunication services deployment in not-profitable areas, increasing the economic viability and, thus, bringing technology closer to people that live in small communities.\par
From the ethical perspective, this work will help in the just distribution of the radio spectrum, which is a limited good belonging to all since will help policy-makers to create better coverage obligations that will maximize the harnessing of it.\par
To sum up, this work also favours the general development of the society and, due to the enabling character of the telecommunications sector, it will help in making advances in virtually all technology sectors.\par
| {
"alphanum_fraction": 0.8258205689,
"avg_line_length": 152.3333333333,
"ext": "tex",
"hexsha": "2bbb194c31c7afa0584b8555615e50834ae27420",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e4c6cdc256caca23cec29362251ad5f328b57eaf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alvarolop/tfm_latex",
"max_forks_repo_path": "tex_files/Appendix1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e4c6cdc256caca23cec29362251ad5f328b57eaf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alvarolop/tfm_latex",
"max_issues_repo_path": "tex_files/Appendix1.tex",
"max_line_length": 589,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e4c6cdc256caca23cec29362251ad5f328b57eaf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alvarolop/tfm_latex",
"max_stars_repo_path": "tex_files/Appendix1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 419,
"size": 2285
} |
\chapter{Risk Management}
\section{Overview}
Risks can be seen as something we'd prefer not to have happen. They may . threaten the project, the software being developed or the organization. Risk management involved anticipating the risks that might affect either the projkect schedule or the quality of the software produced in order to take action to avoid them.
Risks can be categorized according to the type of risk. A complementary classification is to classify risks according to what these risks affect:
\begin{enumerate}
\item Project risks affect the schedule and\/or resources of the project
\item Product risks affect the quality and\/or proformance of the software produced
\item Business risks affect the organization developing or procuring the software.
\end{enumerate}
Often ties, these risk categories can overlap. For example, losing an experienced engineer can present a project, product and business risk at the same time for the following reasons:
\begin{itemize}
\item A project risk because it takes time to get a new team member up to speed
\item A product risk because a replacement may not be as experienced and make more errors
\item A business risk because the engineer's reputation might have been critical to win contracts.
\end{itemize}
For large projects, the results of the risk analysis should be recorded in a risk register along with a consequence analysis. The consequence analysis sets out the consequences of the risks for the project, product and business. Effective risk management makes it easier to cope with problems and to ensure that these do not lead to unacceptable budget or schedule slippage. For smaller projects, formal risk recording may not be required, but the project manager should still be aware of the risks and their consequences.
The specific risks that may affect a project depen on the project and the organizational environment in which the software is being developed. However, there are common risks that are independent of the type of software that is being developed. These can occur in any software development project. Some examples of these are:
\begin{enumerate}
\item Staff turnover: Experienced staff will leave the project before it is finished
\item Management change: There will be a change of company management with different priorities
\item Hardware unavailability: Hardware that is essential for the project will not be delivered on schedule
\item Requirements change: There will be a larger number of changes to the requirements than anticipated
\item Specification delays: Specifications of essential interfaces are not available on schedule
\item Size underestimate: The size of the system had been underestimated
\item Software toll underperformance: Software tools that support the project do not perform as anticipated
\item Technology change: The underlying technology on which the system is uilt is superseded by new technology
\item Product competition: A competitive product is marketed before the system is completed
\end{enumerate}
Software risk management is important because of the inherent uncertainties in software development. These uncertainties stem from loosely defined requirements, requirements changes due to changes in customer needs, difficulties in estimating the time and resources required for software development, and differences in individual skills. There is a need to anticipate risks, understand their impact on the project, the product and the business and take steps to avoid them. There may be a need to draw up contingency plans so that, if the risks do occur, immediate recovery action can be taken.
There is a process of risk management that involves several stages. Here is the outline of it:
\begin{enumerate}
\item Risk identification: Identify possible project, product and business risks
\item Risk analysis: Assessment of the likelihood and consequences of the risks identified
\item Risk planning: Make plans to address the risks, either by avoiding it or by minimizing its effects on the project
\item Risk monitoring: Regularly assess the risk and the plans for risk mitigation and revise the plans when more is known about the risk.
\end{enumerate}
For large projects, the outcomes of the risk management process should be documented in a risk management plan. It should include a discussion of the risks faced by the project, an analysis of these, and information on how it has been planned to manage the risk if it seems likely to be a problem.
The risk management process is an iterative process that continues throughout a project. Onve an initial risk management plan has been drawn up, the situation needs to be monitored to detect emerging risks. As more information about the risks becomes available, the risks need to be re-analyzed and a decision needs to be made about whether the risk priority has changed or not. Depending on that, there may be a need to change the plans for risk avoidance and contigency management.
On an agile development process, risk management is less formal. The same fundamental activities should still be followed and risks discussed, although these may not be formally documented. However, agile development also has a downside. Because of its reliance on people, staff turnover can have significant effects on the project, product and business. Because of the lack of formal documentation and its reliance on informal communication, it is very hard to maintain continuity and momentum if key people leave the project.
\section{Risk Identification}
As seen previously, risk identification is the first stage of the risk management process. It is concerned with identifying the risks that could pose a major threat to the software engineering proecss, the software being developed or the development organization. Risk identification may be a team process in which a team gets together to brainstorm possible risks. Alternatively, project managers may identify risks based on their experience of what went wrong on previous projects.
As a starting point for risk identification, a checklist of different types of risk may be used. Six types of risk may be included in a risk checklist:
\begin{enumerate}
\item Estimation risks arise from the management estimated of the resources required to build the system
\item Organizational risks arise from the organizational environment where the software is being developed
\item People risks are associated with the people in the development team
\item Requirements risks come from changes to the customer requirements and the process of managing the requriements change
\item Technology risks come from the software or hardware technologies that are used to develop the system
\item Tools risks come from the software tools and other support software used to develop the system
\end{enumerate}
Here is a list of the possible risks in each of these categories:
\begin{itemize}
\item Estimation
\begin{enumerate}
\item The time required to develop the system is underestimated
\item The rate of defect repair is underestimated
\item The size of the software is underestimated
\end{enumerate}
\item Organizational
\begin{enumerate}
\item The organization is restructured so that different management are reponsible for the project
\item Organizational financial problems force reductions int he project budget
\end{enumerate}
\item People
\begin{enumerate}
\item It is impossible to recruit staff with the skills required
\item Key staff are ill and unavailable at critical times
\item Required training for staff is not available
\end{enumerate}
\item Requirements
\begin{enumerate}
\item Changes to requirements that require major design rework are proposed
\item Customers fail to understand the impact of requirements changes
\end{enumerate}
\item Technology
\begin{enumerate}
\item The database used in the system cannot process as many transactions per second as expected
\item Faults in reusable software components have to be repaired before these components are reused
\end{enumerate}
\item Tools
\begin{enumerate}
\item The code generated by software code generation tools is inefficient
\item Software tools cannot work together in an integrated way
\end{enumerate}
\end{itemize}
When the risk identification process has been finished, there should be a long list of risks that could occur and that could affect the product, the process and the business. This list then needs to be pruned to a manageable size. If there are too many risks, it is pratically impossible to keep track of all of them.
\section{Risk Analysis}
During the risk analysis process, we have to consider each identified risk and make a judgement about the probability and seriousness of that risk. There is no easy way to do it, and the project manager needs to rely a lot on their judgement and experience from previous projects and the problems that arose in them. It is not possible to make precise, numeric assessment of the probability and seriousness of each risk. Rather, the risk should be assigned to one of a number of bands:
\begin{enumerate}
\item The probability of the risk might be assessed as insignificant, low, moderate, high or very high
\item The effects of the risk might be assessed as catastrophic (threaten the survival of the project), serious (would cause major delays), tolerable (delays are within allowed contigency) or insignificant
\end{enumerate}
After that is possible to tabulate the results of the analysis process using a table ordered according to the seriousness of the risk. To make this assessment, we need detailed information about the project, the process, the development team, and the organization.
Of course, both the probability and the assessment of the effects of a risk may change as more information about the risk becomes available and as risk management plans are implemented. We should therefore update this table during each iteration of the risk management process.
Once the risks have been analyzed and ranked, we should assess which if these risks are the most significant. The judgement must depend on a combination of the probability of the risk arising and the effects of that risk. In general, catastrophic risks should always be considered, as should all serious risks that have more than a moderate probability of occurence.
\cite{spiralModelSoftDev} recommends identifying and monitoring the "top 10" risks. The right number of risks to monitor, however, should depend on the project.
\section{Risk planning}
The risk planning process develops strategies to manage the key risks that threaten the project. For each risk, we have to think of actions that we might take to minimize the disruption to the project if the problem idenfiied in the risk occurs. We should also think about the information that we need to collect while monitoring the project so that emerging problems can be detected before they become serious.
In risk planning, "what-if" questions need to be asked that consider both individual risks, combination of risks, and external factors that affect these risks. For example, questions that might be asked are:
\begin{enumerate}
\item What if several engineers are ill at the same time?
\item What if an economic downturn leads to budget cuts of 20\% for the project?
\item What if the performance of open-source software is inadequate and the only expert on that open-source software leaves?
\item What if the company that supplies and maintains software components goes out of business?
\item What if the customer fails to deliver the revised requriements as predicted?
\end{enumerate}
Based on the answers for those "what-if" questions, we may then devise strategies for managing the risks. Usually, the risk management strategies fall into three categories:
\begin{enumerate}
\item Avoidance strategies: Following these strategies means that the probability of the risk arising is reduced. An example of that would be strategy to replace potentially defective components with bought-in components known for their reliability
\item Minimization strategies: Foolowing these strategies means that the impact of the risk is reduced. An example of that, for example, would be to reorganize the team to have more overlap of work and people understanding each other jobs to minimize the impact of staff illness
\item Contigency plans: Following those strategies means that the team is prepared for the worst and have a strategy in place to deal with it. An example of that would be to prepare a briefing document explaining the importance of the project and its contribuition for the business goals in case of organizational financial problems
\end{enumerate}
It is obviously for the best to use strategies that avoid risks. If that is not possible, we should then use a strategy that reduces the chances of that risk having serious effects. Finally, we should have strategies in place to cope with the risk if it arises. These should reduce the overall impact of a risk on the project or product.
\section{Risk Monitoring}
Risk monitoring is the process of checking that our assumptions about the product, process and business risks have not changed. We should regularly assess each of the identified risk to decide whether or not the effects of the risk have changed. To do this, we have to look at other factors, such as the number of requirements change requests, which give us clues about the risk probability and its effects. These factors are obviously dependent on the types of risk. Factors that may be helpful in assessing these risk types are:
\begin{enumerate}
\item Estimation risks: Failure to meet agreed schedule; failure to clear reported defects
\item Organizational risks: Organizational gossip; lack of action by senior management
\item People risks: Poor staff morale; poor relationships among team members; high staff turnover
\item Requirements risks: Many requirements change requests; customer complaints
\item Technology risks: Late delivery of hardware or support software; many reported technology problems
\item Tools risks: Reluctance by team members to use tools; complaints about software tools; requests for faster computers/more memory, and so on
\end{enumerate}
Risks should be regularly monitored at all stages in a project. At every management review, each of the key risks should be considered and discussed separately. We should decide if the risk is more or less likely to arise and if the seriousness and consequences of the risk have changed.
| {
"alphanum_fraction": 0.7970740916,
"avg_line_length": 99.5503355705,
"ext": "tex",
"hexsha": "708e2cc6f9c41f221714896f83fd05586abb224a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f985962017270f92b8504b071c4c7c3cd5d6338f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alexcosta97/bnu-softEng-cw2",
"max_forks_repo_path": "report/1-theory/1-riskManagement.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f985962017270f92b8504b071c4c7c3cd5d6338f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alexcosta97/bnu-softEng-cw2",
"max_issues_repo_path": "report/1-theory/1-riskManagement.tex",
"max_line_length": 595,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f985962017270f92b8504b071c4c7c3cd5d6338f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alexcosta97/bnu-softEng-cw2",
"max_stars_repo_path": "report/1-theory/1-riskManagement.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2870,
"size": 14833
} |
\section{Introduction}
\subsection{Overview}
\input{product-description.tex}
| {
"alphanum_fraction": 0.8076923077,
"avg_line_length": 15.6,
"ext": "tex",
"hexsha": "1c6351ffbd5d144cc5556568f3f3df4e68967143",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "67de978773134c9bf79ab4647a0459c24b4ff1ac",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Traap/git-client",
"max_forks_repo_path": "overview.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "67de978773134c9bf79ab4647a0459c24b4ff1ac",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Traap/git-client",
"max_issues_repo_path": "overview.tex",
"max_line_length": 31,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "67de978773134c9bf79ab4647a0459c24b4ff1ac",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Traap/git-client",
"max_stars_repo_path": "overview.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 17,
"size": 78
} |
\documentclass[a4paper]{article}
\usepackage[margin=1in]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{newcent}
\usepackage{helvet}
\usepackage{graphicx}
\usepackage[pdftex, pdfborder={0 0 0}]{hyperref}
\frenchspacing
\usepackage{txfonts} % For \varheartsuit and \vardiamondsuit
\usepackage[usenames,dvipsnames]{color} % dvipsnames necessary to made PDFLaTeX work.
\usepackage{enumerate}
\usepackage{listliketab}
\usepackage{latexsym} % \Box
\usepackage{pbox} % \Box
\usepackage{parskip} % line between paragraphs
% suits
%%% Colors
\newcommand{\BC}{\textcolor{OliveGreen}{$\clubsuit$}}
\newcommand{\BD}{\textcolor{RedOrange}{$\vardiamondsuit$}}
\newcommand{\BH}{\textcolor{Red}{$\varheartsuit${}}}
\newcommand{\BS}{\textcolor{Blue}{$\spadesuit${}}}
%suits for pdf-friendly titles
\newcommand{\pdfc}{\texorpdfstring{\BC{}}{C}}
\newcommand{\pdfd}{\texorpdfstring{\BD{}}{D}}
\newcommand{\pdfh}{\texorpdfstring{\BH{}}{H}}
\newcommand{\pdfs}{\texorpdfstring{\BS{}}{S}}
\newenvironment{bidtable}
{\begin{tabbing}
xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx\=\kill}
{\end{tabbing} }%
\newenvironment{bidding}%
{\begin{tabbing}
xxxxxx\=xxxxxx\=xxxxxx\=xxxxxx \kill
}{\end{tabbing} }%end bidding
% writing hands
\newcommand{\cards}[1]{\textsf{#1}}
\newcommand{\spades}[1]{\BS\cards{#1}}
\newcommand{\hearts}[1]{\BH\cards{#1}}
\newcommand{\diamonds}[1]{\BD\cards{#1}}
\newcommand{\clubs}[1]{\BC\cards{#1}}
\newcommand{\void}{--}
\newcommand{\hand}[4]{\spades{#1}\ \hearts{#2}\ \diamonds{#3} \clubs{#4}}
\newcommand{\vhand}[4]{\spades{#1}\\\hearts{#2}\\\diamonds{#3}\\\clubs{#4}}
% The \Box should always appear the same distance from the left margin
\newcommand\onesuit[4]%
{%
\begin{center}%
\begin{tabular}{>{\hfill}p{3cm}cp{3cm}}
& \cards{#2} \\%
\cards{#1}& $\Box$ & \cards{#3} \\%
& \cards{#4} %
\end{tabular}
\end{center}%
}
% A special command if the south hand is not shown to avoid whitespace
\newcommand\onesuitenw[3]%
{%
\begin{center}%
\begin{tabular}{>{\hfill}p{3cm}cp{3cm}}%
& \cards{#2} \\%
\cards{#1}& $\Box$ & \cards{#3}%
\end{tabular}%
\end{center}%
}
\newcommand\dealdiagram[5]%
{%
\begin{center}%
\begin{tabular}{>{\hfill}p{3cm}cp{3cm}}
\pbox{20cm}{\small #5}& \pbox{20cm}{#2} \\%
\pbox{20cm}{#1}& $\Box$ & \pbox{20cm}{#3} \\%
& \pbox{20cm}{#4} %
\end{tabular}
\end{center}%
}
\newcommand\dealdiagramenw[4]%
{%
\begin{center}%
\begin{tabular}{>{\hfill}p{3cm}cp{3cm}}
\pbox{20cm}{\small #4}& \pbox{20cm}{#2} \\%
\pbox{20cm}{#1}& $\Box$ & \pbox{20cm}{#3} \\%
\end{tabular}
\end{center}%
}
\newcommand\dealdiagramew[2]%
{%
\begin{center}%
\begin{tabular}{>{\hfill}p{3cm}cp{3cm}}
\pbox{20cm}{#1}& $\Box$ & \pbox{20cm}{#2} \\%
\end{tabular}
\end{center}%
}
\title{Precision club}
\author{John Smith}
\begin{document}
\maketitle
\tableofcontents
\begin{bidtable}
\O\+\\
1m \> 3+ minor\\
1M \> 5+ major\\
1NT \> 15--17\\
\>May have 5M, 6m or 5-4 minors\\
2\BC \> Strong and forcing\\
2X \> Weak\\
2NT \> 20--21\-
\end{bidtable}
\bigbreak
\begin{bidtable}
1NT-2\BC\+\\
2\BD \> No 4 card major\+\\
2\BH \> 5+\BH , 4\BS , invitational\\
\>\vspace{\baselineskip}\\
2\BS \> 5+\BS , invitational\\
\>\vspace{\baselineskip}\\
3\BH\BS \> Smolen (5+ cards in other major)\\
\>\vspace{\baselineskip}\-\\
2\BH\BS \> 4+ suit\\
2NT \> 4-4 majors, minimum\\
3\BC \> 4-4 majors, maximum\-
\end{bidtable}
\bigbreak
\begin{bidtable}
\O\+\\
2\BC \> Strong, forcing\+\\
2\BD \> Waiting\-\\
2X \> Weak\+\\
2NT \> Ogust\-\-
\end{bidtable}
\bigbreak
\begin{bidtable}
(1NT)--\+\\
Dbl \> Strength, ca 15+\\
2\BC \> At least 5-4 majors\+\\
(D)\+\\
Pass \> 5+\BC , suggestion to play\\
Rdbl \> Asking for better/longer major\\
2\BD \> 5+\BD , suggestion to play\-\\
(P)\+\\
2\BD \> Asking for better/longer major\-\-\\
2\BD \> A weak major or a strong minor\\
2\BH\BS \> Constructive\\
2NT \> 5-5 minors\\
3X \> Preemptive\-
\end{bidtable}
\bigbreak
\begin{bidtable}
1NT--\+\\
2\BC \> Stayman\\
2\BD \> Transfer\+\\
2\BH \> Transfer accept\\
3\BH \> Super accept\-\\
2\BH \> Transfer\+\\
2\BS \> Transfer accept\\
3\BS \> Super accept\-\-
\end{bidtable}
\bigbreak
\section{The 1\pdfc\ opening}
Opening 1\BC\ shows at least 16+ hcp, and is forcing. The
continuation is fairly natural.
\bigbreak
Some hands might be upgraded to 1\BC\ due to distribution, but
wildly distributional hands might also be downgraded to avoid
problems if the opponents preempt.
\bigbreak
\subsection{The 1\pdfd\ negative}
Responding 1\BD\ shows a hand which doesn't have enough values to
establish a game force.
\bigbreak
\dealdiagram
{\vhand{QT}{KQxxxx}{KTx}{Jx}}
{\vhand{Kxx}{T9}{xxx}{Q987x}}
{\vhand{Jx}{AJxxx}{AJx}{KTx}}
{\vhand{A987xx}{\void}{Q9xx}{Axx}}
{Board 35\\North / None\\4\BS X by South\\Lead \BH K}
\dealdiagram
{\vhand{QT}{KQxxxx}{KTx}{Jx}}
{\vhand{Kxx}{T9}{xxx}{Q987x}}
{\vhand{Jx}{AJxxx}{AJx}{KTx}}
{\vhand{A987xx}{\void}{Q9xx}{Axx}}
{Board 35\\North / None\\4\BS X by South\\Lead \BH K}
\begin{itemize}
\item The first item in an unordered list
\item The second item
\item Etc..
\end{itemize}
\bigbreak
\begin{enumerate}
\item The first item in an ordered list
\item The second item
\item Etc..
\end{enumerate}
\bigbreak
\emph{Here's} \textbf{an} \texttt{example} (italic, bold, monospace)
\bigbreak
\end{document}
| {
"alphanum_fraction": 0.6429225417,
"avg_line_length": 24.2711111111,
"ext": "tex",
"hexsha": "a7c1e67ec96e615496944c9b9fd37ffc10e512e7",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-09-13T10:21:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-13T10:21:44.000Z",
"max_forks_repo_head_hexsha": "bd2bdc75ac0a1bea1c16b73bf0532682fbbc694e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "trueproof/bml",
"max_forks_repo_path": "test/expected/test.tex",
"max_issues_count": 19,
"max_issues_repo_head_hexsha": "bd2bdc75ac0a1bea1c16b73bf0532682fbbc694e",
"max_issues_repo_issues_event_max_datetime": "2021-09-14T11:43:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-22T20:11:23.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "trueproof/bml",
"max_issues_repo_path": "test/expected/test.tex",
"max_line_length": 86,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "bd2bdc75ac0a1bea1c16b73bf0532682fbbc694e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "trueproof/bml",
"max_stars_repo_path": "test/expected/test.tex",
"max_stars_repo_stars_event_max_datetime": "2021-05-04T03:17:12.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-23T17:56:15.000Z",
"num_tokens": 2170,
"size": 5461
} |
\section{Background}
\label{sec_background}
Demand for high performance and low power consumption have pushed engineers to integrate many computing resources such as GPUs, DSPs, memories and various IP blocks into a single chip referred as system on a chip (SoC).
As the number of resources increase, their interconnection poses several challenges.
Previously most of the SoC applications used a shared-bus interconnect because of its low cost and simplicity.
But it has its own limitations such as non-scalability and increased wire delays.
At a specific instance, only one master can control the bus and an arbitrator is needed to manage concurrent bus requests.
This deteriorates the overall system performance as more number of resources are interconnected. In order to overcome this, an on-chip packet-switched network called Network-on-Chip was proposed.
Network on chip is an interconnect approach that helps different IPs and subsystems in an SoC to communicate with each other in a scalable manner.
In this approach each processing element (PE) is connected to a switch and multiple switches are interconnected to form a network.
A PE could be a processor core, a DSP core or an IP block.
The network infrastructure helps in routing data from one PE to another in the form of data packets.
This architecture enables concurrent communication between multiple PEs by exploiting hardware parallelism.
%
%Based how switches are interconnected, there are different NoC topologies such as mesh, torus, star, ring, butterfly fat tree (BFT) etc.
%Mesh topology is a 2D arrangement of switches where each switch is connected to its neighbouring switches and its corresponding PE.
%Switches along the edges have 2 or 3 connections as there are only 2 or 3 switches adjacent to it.
%Torus topology is similar to mesh but it is cyclic in nature.
%Here all the switches, including the ones along the edges, will have four connections as shown in Fig.~\ref{fig:torus}.
%Top most switches are connected to the bottommost and rightmost switches to the leftmost.
%Star topology has a central hub to which all the switches are connected.
%Communication between PE's is done through the central hub.
There have been previous efforts to develop open-source NoC architectures specifically targeting FPGAs.
CONNECT NoC generator is the most popular among them~\cite{papa_connect_fpga2012}.
CONNECT is inspired by the fact that FPGAs have a large routing infrastructure available when compared to memory and logic elements and tries to exploit it.
It supports different NoC topologies and uses a single stage pipeline mechanism to minimize hardware and latency.
It has low operating frequency and is still quite resource intensive as seen in Section~\ref{sec_results}.
Split-merge is another NoC infrastructure developed at University of Pennsylvania~\cite{Huan2012}.
It tries to overcome the limited clock performance of CONNECT at the expense of few more resources.
Both CONNECT and Split-merge use credit based traffic control and requires the application developer to have special logic to manage credits.
Presently available open-source architectures are capable of providing relatively high throughput but are quite resource intensive.
Split-merge source code is available in BlueSpec and is challenging to customize.
Both NoCs use custom PE interfaces making it difficult to interface off-the-shelf IP cores.
Researchers have claimed to develop lean NoCs targeting FPGAs but are not publicly available~\cite{hoplite_fpl2015}.
The main motivation for this work is to provide a lite-weight NoC implementation with standard communication interface and high clock performance.
We aim to provide an end-to-end solution where NoC developers can easily interface it with a host computer for data communication.
We call this platform as OpenNoC. | {
"alphanum_fraction": 0.8167185877,
"avg_line_length": 93.9512195122,
"ext": "tex",
"hexsha": "3b19a24d1e01edf5672a1a1620e850839837798a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5066c0f2b53b1fef3b2ed6132bcf010190de149c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vipinkmenon/HNoC",
"max_forks_repo_path": "paper/Sections/2_related.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5066c0f2b53b1fef3b2ed6132bcf010190de149c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vipinkmenon/HNoC",
"max_issues_repo_path": "paper/Sections/2_related.tex",
"max_line_length": 220,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5066c0f2b53b1fef3b2ed6132bcf010190de149c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vipinkmenon/HNoC",
"max_stars_repo_path": "paper/Sections/2_related.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 781,
"size": 3852
} |
%% This is file `elsarticle-template-1-num.tex',
%%
%% Copyright 2009 Elsevier Ltd
%%
%% This file is part of the 'Elsarticle Bundle'.
%% ---------------------------------------------
%%
%% It may be distributed under the conditions of the LaTeX Project Public
%% License, either version 1.2 of this license or (at your option) any
%% later version. The latest version of this license is in
%% http://www.latex-project.org/lppl.txt
%% and version 1.2 or later is part of all distributions of LaTeX
%% version 1999/12/01 or later.
%%
%% Template article for Elsevier's document class `elsarticle'
%% with numbered style bibliographic references
%%
%% $Id: elsarticle-template-1-num.tex 149 2009-10-08 05:01:15Z rishi $
%% $URL: http://lenova.river-valley.com/svn/elsbst/trunk/elsarticle-template-1-num.tex $
%%
%% Use the option review to obtain double line spacing
%% \documentclass[preprint,review,12pt]{elsarticle}
%% Use the options 1p,twocolumn; 3p; 3p,twocolumn; 5p; or 5p,twocolumn
%% for a journal layout:
%% \documentclass[final,1p,times]{elsarticle}
%% \documentclass[final,1p,times,twocolumn]{elsarticle}
%% \documentclass[final,3p,times]{elsarticle}
%% \documentclass[final,3p,times,twocolumn]{elsarticle}
%% \documentclass[final,5p,times]{elsarticle}
%% \documentclass[final,5p,times,twocolumn]{elsarticle}
%% The lineno packages adds line numbers. Start line numbering with
%% \begin{linenumbers}, end it with \end{linenumbers}. Or switch it on
%% for the whole article with \linenumbers after \end{frontmatter}.%% natbib.sty is loaded by default. However, natbib options can be
%% provided with \biboptions{...} command. Following options are
%% valid:
%% round - round parentheses are used (default)
%% square - square brackets are used [option]
%% curly - curly braces are used {option}
%% angle - angle brackets are used <option>
%% semicolon - multiple citations separated by semi-colon
%% colon - same as semicolon, an earlier confusion
%% comma - separated by comma
%% numbers- selects numerical citations
%% super - numerical citations as superscripts
%% sort - sorts multiple citations according to order in ref. list
%% sort&compress - like sort, but also compresses numerical citations
%% compress - compresses without sorting
%%
%% \biboptions{comma,round}
% \biboptions{}
%% use the tnoteref command within \title for footnotes;
%% use the tnotetext command for the associated footnote;
%% use the fnref command within \author or \address for footnotes;
%% use the fntext command for the associated footnote;
%% use the corref command within \author for corresponding author footnotes;
%% use the cortext command for the associated footnote;
%% use the ead command for the email address,
%% and the form \ead[url] for the home page:
%%
%% \title{Title\tnoteref{label1}}
%% \tnotetext[label1]{}
%% \author{Name\corref{cor1}\fnref{label2}}
%% \ead{email address}
%% \ead[url]{home page}
%% \fntext[label2]{}
%% \cortext[cor1]{}
%% \address{Address\fnref{label3}}
%% \fntext[label3]{}
%% use optional labels to link authors explicitly to addresses:
%% \author[label1,label2]{<author name>}
%% \address[label1]{<address>}
%% \address[label2]{<address>}
%% Authors are advised to submit their bibtex database files. They are
%% requested to list a bibtex style file in the manuscript if they do
%% not want to use model1-num-names.bst.
%% The amsthm package provides extended theorem environments
%% The graphicx package provides the includegraphics command.
%% The amssymb package provides various useful mathematical symbols
%% References without bibTeX database:
% \begin{thebibliography}{00}
%% \bibitem must have the following form:
%% \bibitem{key}...
%%
% \bibitem{}
% \end{thebibliography}
%%
%% End of file `elsarticle-template-1-num.tex'.
\documentclass[preprint]{elsarticle}
\usepackage{graphicx}
\usepackage{amssymb}
%% \usepackage{amsthm}
\usepackage{caption}
\usepackage{floatrow}
\usepackage{lineno}
\usepackage[a4paper, total={6in, 8in}]{geometry}
\usepackage{wrapfig}
\usepackage{multicol}
\usepackage{xcolor}
\usepackage{subcaption}
\newfloatcommand{capbtabbox}{table}[][\FBwidth]
\newcommand{\todo}[1]{{\color{red}#1}}
\journal{ANZCOP}
\begin{document}
\begin{frontmatter}
\title{Precision measurement of weak transitions from excited states in Helium by counting ultracold atoms}
\author{J. A. Ross, K. F. Thomas, B. M. Henson, S. S. Hodgman, A. Truscott, K. G. H. Baldwin}
\address{ Laser Physics Centre, Research School of Physics and Engineering, The Australian National University, Canberra, ACT 2601, Australia
}%$^2 $Physics Department, University of Windsor, Windsor, Ontario, Canada}
\date{}
\begin{keyword}
Frequency metrology \sep Atomic spectroscopy \sep Bose-Einstein Condensate \sep Metastable Helium \sep Precision Measurement
\end{keyword}
\end{frontmatter}
The advancing precision of modern atomic spectroscopy is beginning to afford optical tests of fundamental physics in helium through, for instance, nuclear charge radii determinations\cite{Pachucki2017}. Helium therefore provides a testbed as appealing as Hydrogen for spectroscopic tests of QED. Among outstanding discrepancies between predicted and observed Helium transition lines are the 7.5$\sigma$ difference in the $n=3$ singlet-triplet splitting\cite{Luo2015} and the $93\sigma$ difference between Martin's measurements of the $2^3P_2 \rightarrow 5^3S_1$ and $2^3P_2 \rightarrow5^3D$ transitions values, and recent predictions by Drake \cite{Drake2007}.
We contribute to both of these by measuring five transitions from the $2^3P_2$ state, improving on Martin's measurements with an order of magnitude greater precision, and making the first observation of the spin-forbidden $2^3P_2\rightarrow5^1D_2$ transition in Helium. Our measurements constrain the $5^3D$ and $5^1D$ ionization energies of 4He\cite{Morton2006} to 150 parts per billion, and the $5^3S$ to 28 parts per billion.
\begin{wrapfigure}{l}{0.5\textwidth}
\begin{center}
\includegraphics[width=\textwidth]{lvl_diag.png}
\end{center}
\caption{Level diagram showing the measured transitions from the $2^3\mathrm{P}_2$ state to the $n=5$ manifold (blue), after pumping from the metastable $2^3\mathrm{S}_1$ state in an optical molasses.\\
}
\label{fig:my_label}
\end{wrapfigure}
We use a novel spectroscopic technique for direct, low-background laser spectroscopy of excited state transitions. We apply a probe beam during the optical cooling of ultracold trapped atoms to disturb the cooling process, with the cooling beam acting also as the optical pump. Evaporative cooling transforms this disturbance into change in a final atom number, which we measure with our single-atom sensitive detector. We identify the transition frequency as the peak of the change in atom number with respect to probe beam frequency. This method is advantageous for studying excited-state transitions as it provides an essentially background-free measurement protocol which is, in principle, sensitive to absorption of single photons. Further, the experiments are performed in a controlled magnetic field for precise calibration of Zeeman shifts, and in ultra-high vacuum with densities low enough such that the pressure shift is minimal.
The predicted transition frequencies\cite{Pachucki2017} agree with the observed values within experimental error, which is ultimately limited by the absolute accuracy of our High Finesse WS-8 wave meter. The widths of the observed peaks agree well with the predicted natural linewidths. We discuss potential extensions of this method to measure decay branching ratios, lifetimes, and oscillator strengths, sub-Doppler spectrometry, and the prospect of isotope-shift measurements in 3He-4He mixtures.
%% Aligned horizontally
%% https://tex.stackexchange.com/questions/120296/how-to-manipulate-width-and-position-of-items-inside-floatrow
\begin{figure}[h]
\begin{floatrow}
\ffigbox[][]
{\caption{Observed $ 2^3P_2 \rightarrow5^1D_2$ transition peak in uniform magnetic fields of strength 18G (left) and 11G (right). This singlet-triplet transition has a predicted oscillator strength five orders of magnitude weaker than that of the cooling (pump) transition, with accurate Lorentzian fits.}}
{\includegraphics[width=0.5\textwidth]{5^1D_2_plot_combo.png}}
\capbtabbox{%
\begin{tabular}{|c||c|c|c|p{1cm}}
\hline
Upper state & $\nu_{obs}$ (MHz) & $ \nu_{obs} - \nu_{old}\cite{Martin60}$\\
\hline\hline
$5^3\mathrm{S}_1$ & 727303247(4) & 13713(201)\\
$5^3\mathrm{D}_1$ & 744396515(20) & 13575(168)\\
$5^3\mathrm{D}_2$ & 744396235(20) & 13855(168)\\
$5^3\mathrm{D}_3$ & 744396204(20) & 13886(168)\\
$5^1\mathrm{D}_2$ & 744430345(20) & N/A\\
\hline
\end{tabular}
\newline\newline\newline\newline
}{%
\caption{Results of our measurements of transition frequencies between the $2^3\textrm{P}_2$ state to the listed upper states. Uncertainty in observed frequency is dominated by the limiting wavemeter accuracy. The uncertainty in the difference between new values and Martin's results \cite{Martin60} is the combined uncertainty of both measurements.}
}
\end{floatrow}
\end{figure}
%% Stacked vertically
% \begin{figure}[h]
% \includegraphics[width=0.65\textwidth]{5^1D_2_plot_combo.png}
% \caption{Example of observed transition peaks, here a singlet-triplet transition with predicted oscillator strength five orders of magnitude weaker than that of the cooling transition. Left and right peaks are observed absorption peaks with 18G and 11G background fields, respectively. }
% \end{figure}
% \begin{table}[h]
% \begin{center}
% \begin{tabular}{|c||c|c|c|p{1cm}}
% \hline
% Upper state & $\nu_{obs}$ (MHz) & $ \nu_{obs} - \nu_{\textrm{Martin}}\cite{Martin60}$\\
% \hline\hline
% $5^3\mathrm{S}_1$ & 727303247(4) & 13713(201)\\
% $5^3\mathrm{D}_1$ & 744396515(20) & 13575(168)\\
% $5^3\mathrm{D}_2$ & 744396235(20) & 13855(168)\\
% $5^3\mathrm{D}_3$ & 744396204(20) & 13886(168)\\
% $5^1\mathrm{D}_2$ & 744430345(20) & N/A\\
% \hline
% \end{tabular}
% \caption{Results of our measurements of transition frequencies between the $2^3\textrm{P}_2$ state to the listed upper states. Uncertainty in observed frequency is dominated by the limiting wavemeter accuracy, and the uncertainty in the difference between new and old values is the combined uncertainty of both measurements.}% Martin method}
% \end{center}
% \end{table}
%% References with bibTeX database:
% \newline
\bibliographystyle{model1-num-names}
\bibliography{bib.bib}
% \newpage
% \section{100 word abstract}
% We report on the first observation of the spin-forbidden 2 3P2- 5 1D2 transition in Helium, and an improvement over historical measurements of the 2 3P2 -- 5 3S1 and 2 3P2 -- 5 3D transitions [3], with an order of magnitude greater precision. We use the novel technique of measuring atom loss from a BEC after disturbing the cooling process with a probe beam. This allows low-background spectroscopy of excited state transitions with controlled magnetic background in ultra-high vacuum. The widths and positions of the observed peak frequencies[1] agree with the theory within experimental error. These measurements constrain the 5 3D and 5 1D ionization energies of 4He[5] to 150 parts per billion, and the $5^3S$ to 28 parts per billion.
% \section{250 word abstract}
% The advancing precision of modern atomic spectroscopy is beginning to afford optical tests of fundamental physics in helium through, for instance, nuclear charge radii determinations[1]. Among outstanding discrepancies between predicted and observed Helium transition lines and are the 7.5 sigma difference in the N=3 singlet-triplet splitting[2] and the 93 sigma between Drake's predictions[4] and Martin's measurements[3] of the 2 3P2 -- 5 3S1 and 2 3P2 -- 5 3D transitions. The contribution of this work is a resolution of the latter disagreements, with an order of magnitude greater precision, and a contribution to the former in the first measurement of the 1s3p 3P2- 1s5d 1D2 spin-forbidden transition in Helium. We use a novel technique for direct, low-background laser spectroscopy of excited state transitions in Helium. We apply a probe beam during the production of a trapped Bose-Einstein condensate to disturb an optical cooling stage. Evaporative cooling transduces this stimulus into change in a final atom number, which we measure with our single-atom sensitive detector. This method is, in principle, single-photon sensitive. The experiments were performed in a controlled magnetic field in ultra-high vacuum. The predicted resonance widths and frequencies[1] agree with the observed values within experimental error. These measurements constrain the 5 3D and 5 1D ionization energies of 4He to 150 parts per billion, and the 5 3S to 28 parts per billion[5].
% Wienczek: Namely, the $2^3P1-2^3P2$ transition to 25kHz determines the fine structure constant to parts-per-billion and the $2^3P-2^3S$ to 1.4kHz, which can determine the nuclear charge radius to below 0.1\%, better than muonic helium Lamb shift. In the former case, the bottleneck is that theory is not sufficiently developed!
% Measured 2L-3D intervals are about 1MHz larger than theory.
% Patkos Measured $2^3S-2^3P$ and $2^3S-2^1S$ transitions give different isotope shifts, which give disparate predictions for the nuclear charge radii in He3 and He4
% The authors find a four-sigma disagreement between their prediction of the difference-of-squares of the nuclear radii and the measured values. There are two results for the $2^3P-2^3S$ transition which differ slightly from each other but significantly from the $2^1S-2^3S$ transition, (1.069(3) and 1.061(3) fm, and 1.027(11) fm, respectively.)
% We demonstrate a method for spectroscopy from short-lived excited states which uses forced radio evaporation as a transducer between photon scattering events and the population of a Bose-Einstein condensate of Helium in the metastable 23S1 state (He*). Our multichannel plate and delay-line detector stack is sensitive to individual atoms, allowing precise measurements of small differences in atom number.
\end{document}
| {
"alphanum_fraction": 0.7480140913,
"avg_line_length": 60.3208333333,
"ext": "tex",
"hexsha": "0fc5e36b9b98ba918f86c8469841e68c58a9f10a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "06ed4e7ca487d4379c21ed5d13608ff8056b1dd4",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "GroundhogState/He_3P_transitions",
"max_forks_repo_path": "doc/ANZCOP/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "06ed4e7ca487d4379c21ed5d13608ff8056b1dd4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "GroundhogState/He_3P_transitions",
"max_issues_repo_path": "doc/ANZCOP/main.tex",
"max_line_length": 1479,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "06ed4e7ca487d4379c21ed5d13608ff8056b1dd4",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "GroundhogState/He_3P_transitions",
"max_stars_repo_path": "doc/ANZCOP/main.tex",
"max_stars_repo_stars_event_max_datetime": "2019-05-26T03:24:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-26T03:24:26.000Z",
"num_tokens": 3893,
"size": 14477
} |
\documentclass{gapd}
\Type{Short Note}
\Title{Uniqueness in Logic Puzzles}
\Author{James Tiberius Kirk}{NCC~1701, Enterprise}
\Author{Montgomery Scott}{NCC~1701, Enterprise}
\Author{Leonard McCoy}{NCC~1701, Enterprise}
\Abstract{Pure deduction puzzles typically have a single unique
solution. However, some puzzle setters argue that challenges with
multiple solutions are also valid, if they can be solved by
eliminating choices that lead to ambiguous states. This paper
considers the arguments for and against this position, and presents
a counterexample that demonstrates the danger of using uniqueness to
decide between multiple solutions.}
\Issue{1}{1}{2015}
%\Pages{35}{37}
\begin{document}
\maketitle
\section{Introduction}
\label{sec:Introduction}
\lettrine{A}{} characteristic of pure deduction puzzles,
such as Japanese logic puzzles, is that each challenge has a single
unique solution. This allows such challenges to be solved by
deduction rather than guesswork~\cite{browne}.
I was therefore surprised to find a Kakuro challenge with multiple
solutions in a publication as respectable as \textit{The
Guardian}~\cite{guardian}. This was the first time that I had ever
encountered such a case in print. The aim in Kakuro is to fill each
cell with a digit in the range {\sf 1--9}, such that each horizontal
and vertical run adds to the hint total shown, and no digit is
repeated within each run~\cite{nikoli}.
Figure~\ref{fig:Kakuro} shows the relevant section of the Kakuro
challenge in question (all other values have been resolved). Possible
values for the final few unresolved cells are shown in small print,
and a key cell with possible values {\sf 4} or {\sf 5} is circled.
This challenge has three possible solutions, depending on whether this
key cell takes the value {\sf 4} or {\sf 5}, as shown.
\begin{figure*}[!thb]
\centering
\includegraphics[width=\linewidth]{graphics/kakuro-1372-multiple-1.pdf}
\caption{A Kakuro challenge with three solutions. The circled cell
can take the value {\sf 4} or {\sf 5}.}
\label{fig:Kakuro}
\end{figure*}
After alerting the UK setter of this challenge to what appeared to be
a flawed design with no deducible solution, I was also surprised by
his response. He maintained that this challenge was indeed valid, and
could be solved by deduction based on \textit{relative} uniqueness.
\section{The Case For Ambiguity}
\label{sec:Ambiguity}
The setter of the ambiguous Kakuro challenge argued as follows:
\begin{quote}\itshape
Any move M that leads to multiple solutions can be eliminated.
\end{quote}
For instance, the value of the circled cell in Figure~\ref{fig:Kakuro}
cannot be {\sf 4}, as such a move would allow multiple solutions (top
row). This cell must therefore take the value {\sf 5}, producing the
single `correct' solution (bottom row).
This argument of \textit{deduction by relative uniqueness}, for selecting
among multiple solutions, seems fair enough at first glance. It adds
some much-needed depth to Kakuro, by allowing an additional solution
strategy. It also increases the number of possible challenges that
can be devised, by allowing cases with multiple solutions that
traditional setters would not allow.
However, Japanese publisher Nikoli, the inventor and major supplier of
Kakuro, categorically state that uniqueness should not be exploited in
this way to solve Kakuro, or any of their other pure deduction
puzzles.\footnote{Strongly worded personal correspondence.} We now
consider the argument for absolute rather than relative uniqueness.
\section{The Case For Uniqueness}
\label{sec:Uniqueness}
A serious problem with deduction by relative uniqueness is that it
does not work unless the solver also knows that this rule is in force,
but uniqueness is generally assumed for such puzzles rather than
explicitly stated. For example, the Kakuro rules provided by \textit{The
Guardian} make no mention of uniqueness, making those rules
insufficient to solve the ambiguous challenge shown in
Figure~\ref{fig:Kakuro}~\cite{guardian}.
Further, there is an obvious corollary to the argument (1) made above:
\begin{quote}\itshape
Any move leading to ambiguous move M can therefore also be
eliminated.
\end{quote}
Hence, chaining backwards from ambiguous move $M$, every prior move
can also be said to lead to ambiguity and hence be eliminated, until
the challenge has no valid moves. Or can it? There is no clear
answer to this question, which depends on the setter's and solver's
interpretations.
\subsection{Counterexample}
\label{sec:Counteraxeample}
The following counterexample demonstrates the dangers of deduction by
relative uniqueness. Slitherlink is a deduction puzzle in which a
simple closed path must be traced through orthogonal vertices of a
square grid, to visit the number of sides indicated on each numbered
cell~\cite{times}. For example, Figure~\ref{fig:SlitherlinkSolutions}
shows a simple 2$\times$3 Slitherlink challenge with three valid
solutions: $a$, $b$ and $c$.
\begin{figure*}[!thb]
\centering
\includegraphics[width=\linewidth]{graphics/slitherlink-solns-1.pdf}
\caption{A 2$\times$3
Slitherlink challenge (left) with three solutions ($a$, $b$ and
$c$).}
\label{fig:SlitherlinkSolutions}
\end{figure*}
\begin{figure}[htb]
\centering
\includegraphics[width=0.85\columnwidth]{graphics/slitherlink-deduce-a2.pdf}
\caption{Deduction by uniqueness yields $c$.}
\label{fig:SlitherlinkDeductionA}
\end{figure}
Given that four edges can be deduced as shown in
Figure~\ref{fig:SlitherlinkDeductionA} (top), consider the move
indicated by the dotted line. If there is \textit{not} an edge between
these vertices then two possible solutions exist (left), hence this
move must be an edge and $c$ must be the `correct' solution (right).
However, if the same process is applied to the move indicated in
Figure~\ref{fig:SlitherlinkDeductionB} (top, dotted), then $b$ is
deduced to be the `correct' solution (right).
\begin{figure}[htb]
\centering
\includegraphics[width=0.85\columnwidth]{graphics/slitherlink-deduce-b2.pdf}
\caption{Deduction by uniqueness yields $b$.}
\label{fig:SlitherlinkDeductionB}
\end{figure}
Deduction by relative uniqueness therefore gives two conflicting
`correct' solutions, $b$ and $c$, depending on processing order. To
derive the same solution as the setter, the solver would have to
follow the same sequence of decisions in the exact same order, but
there is no way to enforce this in practice. Deduction by relative
uniqueness is not guaranteed to yield the same solution from among
multiple solutions in all cases.
This Slitherlink counterexample could be said to have one valid
solution (depending on the order in which the solver made their
deductions), two equally valid solutions (through deduction by
relative uniqueness) or three equally valid solutions (which it does,
after all---see Figure~\ref{fig:SlitherlinkSolutions}). This is
clearly an unsatisfactory state of affairs. But if absolute
uniqueness is enforced, and such cases of multiple solutions avoided,
then all of these problems simply go away, at no real cost. As expert
puzzle designer Hiroshi Higashida points out:
\begin{quote}
\textit{Puzzle creators, not only solvers, mustn't defy rules, either}
\cite[p216]{Higashida2010}.
\end{quote}
\section{Conclusion}
\label{sec:Conclusion}
The characteristic of pure deduction puzzles to have a single unique
solution is not only elegant, but performs a vital practical function.
It guarantees that challenges can be solved by deduction alone,
without guesswork or ambiguity, and means that the setter and solver
are both playing from the same rule set without the need to make
assumptions about implied or hidden rules. Further, uniqueness makes
challenges self-checking; if the player has deduced a solution, then
it must be the correct one. As tempting as it may be to relax this
constraint of absolute uniqueness and instead exploit relative
uniqueness as a solution strategy, this is best avoided in pure
deduction puzzles.
\section*{Acknowledgements}
Thanks to Jimmy Goto for clarifying Nikoli's position on uniqueness.
\section*{References}
\begin{thebibliography}{4}
\bibitem{browne} Browne, C., `Deductive Search for Logic Puzzles',
\textit{Computational Intelligence and Games (CIG'13)}, IEEE Press,
2013, pp.~359--366.
\bibitem{guardian} Anonymous, `Kakuro', \textit{The Guardian},
29~November~2013.
\bibitem{nikoli} \textit{Kakuro 1}, Tokyo, Nikoli, 1986.
\bibitem{times} \textit{The Times Japanese Logic Puzzles: Hitori, Hashi,
Slitherlink and Mosaic}, London, Harper Collins, 2006.
\bibitem{Higashida2010} Higashida, H., `Machine-Made Puzzles and
Hand-Made Puzzles', \textit{IFIP Advances in Information and
Communication Technology (AICT)}, vol.~333, 2010, pp. 214--222.
\end{thebibliography}
\Note{BlueNoteBackground}{%
{\bf Cameron Browne} is a Vice-Chancellor's Senior Research Fellow
at QUT, Brisbane, Australia, whose research interests include artificial intelligence and automated game design.\\
{\bf Address:} School of EECS, Science and Engineering Faculty, QUT,
Brisbane, 4001, Australia.\\
{\bf Email:} [email protected]
%
}
\end{document}
| {
"alphanum_fraction": 0.7796116505,
"avg_line_length": 39.9568965517,
"ext": "tex",
"hexsha": "2274a655593e32fd5cdaf0ebf09635b1c623f55a",
"lang": "TeX",
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2021-06-24T07:32:31.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-12-21T14:36:13.000Z",
"max_forks_repo_head_hexsha": "c4b8e360ee8b0eb8f547a7cc2e3aff862fd8cf4a",
"max_forks_repo_licenses": [
"Unlicense",
"MIT"
],
"max_forks_repo_name": "RocketshipGames/gapd.cls",
"max_forks_repo_path": "gapd-sample.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c4b8e360ee8b0eb8f547a7cc2e3aff862fd8cf4a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense",
"MIT"
],
"max_issues_repo_name": "RocketshipGames/gapd.cls",
"max_issues_repo_path": "gapd-sample.tex",
"max_line_length": 116,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "c4b8e360ee8b0eb8f547a7cc2e3aff862fd8cf4a",
"max_stars_repo_licenses": [
"Unlicense",
"MIT"
],
"max_stars_repo_name": "RocketshipGames/gapd.cls",
"max_stars_repo_path": "gapd-sample.tex",
"max_stars_repo_stars_event_max_datetime": "2021-05-13T08:45:39.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-01-11T04:05:08.000Z",
"num_tokens": 2405,
"size": 9270
} |
\documentclass[Thesis.tex]{subfiles}
\begin{document}
\chapter{The Quantum Problem}
\label{chp:the-quantum-problem}
\glsresetall
\section{Problem Statement}
Say you want to investigate the properties of some quantum mechanical system.
The first step is then to firmly establish how we should describe this system
and the laws that govern its behaviour.\footnote{For the entirety of this
thesis, we shall assume that the systems we consider do not show any significant
relativistic behaviour, so that no such considerations are necessary.}
If our system of interest consisted of non-quantum entities (e.g.\ the
trajectory of a baseball as it is thrown through the air towards a batter), we
would likely turn to our classical laws, such as Newton's second law of motion
%
\begin{align}
\sum_i \vec F_i(t) = \dv{\vec p(t)}{t},
\end{align}
%
\noindent where $\vec F_i$ are the forces acting on the ball, and $\vec p$ is its momentum at
any given point in time, $t$. Using the law of motion we can use our knowledge about how the
environment affects the object to \emph{deterministically} calculate the resulting
behaviour. The really nice thing is that, if we also know the mass of the object, we
can derive the value of any other measurable physical quantity of interest. As such,
we can say that \emph{solving} a classical system consists of the following steps:
\begin{enumerate}
\item Define the environment, i.e.\ the forces acting on the object(s)
\item Use the second law of motion to obtain momentum $\vec p(t)$ and position $\vx(t)$
\item Compute quantity of interest, $Q(\vec p, \vec x; t)$
\end{enumerate}
Moving to the quantum world, much of the same procedure remains the same. For the quantum
case, we have a different law of motion. In our non-relativistic view, this is the
\gls{tdse}:
\begin{align}
\hat H\ket{\Psi} &= i\hbar \pdv{}{t}\ket{\Psi},\label{eq:schrodinger-time-dependent-general}
\end{align}
%
where $i=\sqrt{-1}$ is the imaginary unit and $\hbar=\flatfrac{h}{2\pi}$ is the reduced Planck constant.
The thing we want to solve for in this case is the so called wave function $\ket{\Psi}$
(explained momentarily), while the description of the system (analogous to the forces in
classical mechanics) goes into $\hat H$. We refer to the latter as the Hamiltonian
operator, and it should be a complete description of the kinetic and potential
energies of the particles involved. As an example, we write the equation for a
single particle at position $\vx$ in an energy potential $V(\vx; t)$ as follows
(where we explicitly use the position basis):
\begin{align}
\qty[-\frac{\hbar^2}{2m} \laplacian + V(\vb x; t)]\Psi(\vb x;t) =
i\hbar\pdv{\Psi(\vb x; t)}{t},\label{eq:schrodinger-time-dependent-position-basis}
\end{align}
%
where the first term constitutes the kinetic energy of the particle (with mass $m$), and the second term is
naturally the potential energy. For the systems that we shall consider in this thesis, the
Hamiltonians will all take this form, only varying the functional form of $V$.
Knowing the wave function $\Psi$ of a system is analogous to knowing position
and momentum in the classical view in that we can compute any observable
quantity from it (more on this in \cref{sec:obs-from-psi-to-Q}). As such, obtaining the full
expression for the correct wave function is of immense use.
The wave function lacks a clear physical intuition for what exactly it \emph{is},
like we have for position and momentum in classical mechanics. Perhaps the most helpful way to view $\Psi$ is
through the fact that its squared absolute value, $\abs{\Psi(\vb x; t)}^2$, is the
probability of finding a particle at position $\vb x$ at time $t$. Thinking of the (squared
norm of the) wave function as a substitute for the classical position $\vb x$ can
therefore be a helpful aid, as long as we keep the probabilistic nature of it in mind.
Summarizing the steps for \emph{solving} a quantum system, analogous to the
classical approach, we have the following plan:
\begin{enumerate}
\item Define the environment through choosing a form for the Hamiltonian $\hat H$
\item Use the \gls{tdse} to obtain the wave function $\ket\Psi$
\item Use the wave function to compute quantities $Q$ of interest
\end{enumerate}
\section{Stationary States}
The \gls{tdse} (\cref{eq:schrodinger-time-dependent-position-basis}) is a partial differential equation,
since it contains partial derivatives of the wave function with respect to both position and
time. The standard approach to solving this equation is through \emph{separation of
variables}. We assume that we can factorize the full wave function as follows:
\begin{align}
\Psi(\vb x; t) &= \psi(\vb x)\phi(t).\label{eq:separatable-wave-func-def}
\end{align}
%
In addition, we assume that $V(\vb x; t) = V(\vb x)$, i.e.\ that the potential is
time-independent.\footnote{There are systems for which this assumption does not hold. We
will, however, restrict ourself to consider only Hamiltonians for which this description
is valid} With these assumptions, we can divide through with $\Psi$ in
\cref{eq:schrodinger-time-dependent-position-basis} and obtain the
following:\footnote{It could be tempting to simply strike $\Psi$ from the lhs.\ of
\cref{eq:schrodinger-time-dependent-position-basis} when dividing by $\Psi$. Nevertheless, we
must remember that the Hamiltonian is an operator (specifically seen through the
$\laplacian$ in this case), and so we must divide only after letting this operate on
$\Psi$.}
\begin{align}
\qty[- \frac{\hbar^2}{2m} \laplacian \psi + V(\vb x)\psi]\psi^{-1} &=
i\hbar\dv{\phi}{t} \phi^{-1}.
\end{align}
%
We now make the following subtle observation: Since the lhs., a function of $\vb x$, is
equal to the rhs, a function of $t$, they must both be equal to a constant. If
this was not true, we could vary one of $\vx$ or $t$ and alter only one side of
the equation, leaving it invalid.
As both sides have units of energy, let's denote this constant energy as $E$, and proceed
to solve each equation by itself.
The time dependent equation becomes:
\begin{align}
i\hbar \dv{\phi}{t} &= E\phi(t),
\end{align}
which is trivial to solve:
\begin{align}
\phi(t) &= Ae^{-iEt/\hbar},
\end{align}
for some constant $A=\phi(0)$ determined by boundary conditions.
The time-independent equation, known as the \gls{tise}, is:
\begin{align}
- \frac{\hbar^2}{2m} \laplacian\psi + V(\vb x)\psi =
E\psi.\label{eq:schrodinger-time-independent-position-basis}
\end{align}
The solutions to this equation are the \emph{stationary states} of the system. If
we are able to find these solutions, then we automatically have also the full time
dependent solution through \cref{eq:separatable-wave-func-def}.
If we return \cref{eq:schrodinger-time-independent-position-basis} to the more general
form,
\begin{align}
\hat H \ket{\psi} &= E\ket{\psi},
\end{align}
we can recognize the problem as an eigenvalue problem where we seek the eigenvalues ($E$)
and eigenvectors ($\ket{\psi}$) of the operator $\hat H$. In light of this, we prefer to
explicitly label the equation to account for the possibility that the equation could have
multiple (potentially infinite) solutions, and write this as
\begin{align}
\hat H\ket{\psi_n} &= E_n\ket{\psi_n}.
\end{align}
Each of the $\ket{\psi_n}$ represents one possible stationary state, and could for instance be
different levels of energy excitations within an atom. For our purposes, we will only care
about the so called \emph{ground state}, i.e.\ the state $\ket{\psi_n}$ corresponding to the
lowest possible $E_n$. By convention, we assume that the energies are ordered such that $E_i
\leq E_j$ if $i < j$, and denote the ground state as $\ket{\psi_0}$ and the corresponding ground
state energy as $E_0$.
\section{Many-Body Systems}
Up until now, for simplicity, we've only considered the description of single-particle
systems. Changing the number of particles is a change in the system description, and as
such it entails modifying the Hamiltonian operator accordingly. Everything presented thus
far generalizes well to the case of more than one particle, simply by introducing the
appropriate sums. The general form of the many-body Hamiltonian we will consider is now:
\begin{align}
\hat H &= - \sum_{i=1}^N \frac{\hbar^2}{2m_i} \laplacian_i + V(\vb x_1, \vb x_2,\dots,
\vb x_N)\\
&= -\sum_{i=1}^N \frac{\hbar^2}{2m_i} \laplacian_i + V(\vb
X),\label{eq:Hamiltonian-operator-general}
\end{align}
were $\mat X \defeq (\vb x_1\ \vb x_2\ \dots\ \vb x_N)^T\in \mathbb{R}^{N\times D}$ is the matrix
of $D$-dimensional row vectors of coordinates for each particle. For further clarity,
$\vb x_i \defeq \sum_{d=1}^D x_{i,d} \vb e_d$ is a $D$-dimensional vector described by
its coordinates $x_{i,d}$ (with unit vectors $\vb{e}_d$), and the corresponding Laplacian
operator is
\begin{align}
\laplacian_k \defeq \sum_{d = 1}^D \pdv[2]{}{x_{k,d}}.
\end{align}
\section{Requirements of Wave Functions}\label{sec:requirements-of-wave-functions}
We have stated earlier that by solving the Schrödinger equation and obtaining the wave
function, we can compute any desirable quantity of interest. In order for the wave
function to fulfill this rather impressive encoding of everything about the system, it has
to satisfy certain criteria. We now devote some special consideration to make these
requirements explicit.
In order to represent a physically observable system, a wave function $\Psi$ must:
\begin{enumerate}
\item Be a solution to the Schrödinger equation
\item Be normalizable (in order to represent a probability)
\item Be a continuous function of space
\item Have a continuous first order spacial derivative
\item Obey suitable symmetry requirements
\end{enumerate}
%
While the first requirement is obvious, points 2-4 boil down to $\Psi$ taking a
functional form that is well behaved, satisfying required boundary conditions and being
possible to view as a \gls{pdf}. The last point is perhaps less clear, and
we devote some further attention to this point in particular.
\subsection{Symmetry of Wave Functions}
Nature has many examples of systems made up of particles of the same
species. That is, the particles all have the same mass, spin, electromagnetic
charge etc.\ such that there is no way to distinguish one from the other by
measuring their properties. An example could be the electrons of an atom, all of
which have the exact same physical properties.
In classical mechanics, we can still distinguish identical particles by other
means. Imagine for instance a set of perfectly identical planets in orbit. Even
though they have all of the same physical properties, we can still enumerate
them and keep track of which is which. This is due to the fact that their
position in time and space is deterministically defined by their current state,
which allows us to track them.
In quantum mechanics, however, we no longer have this deterministic view. In
this world, even if we know where all the individual electrons are at a specific
point in time, we cannot say with certainty where they will be at a later
time. We blame this on the uncertainty principle, and the result is that systems of
identical particles become systems of \emph{indistinguishable} particles in
quantum mechanics.
Consider now a system of two indistinguishable particles, labeled $\vec x_1$ and
$\vec x_2$, where $\vec x_i$ contains all the quantum numbers required to describe
particle $i$ (e.g.\ position coordinates and the $z$ component of spin). The
system is then described by a wave function
\begin{align}
\Psi(\vec x_1, \vec x_2).
\end{align}
Because the particles are indistinguishable, this labeling of 1 and 2 is
arbitrary, and so we should be able to relabel them:
\begin{align}
\Psi(\vec x_2, \vec x_1).
\end{align}
These two expressions, which represent exchanging the two particles, \emph{must}
describe the same physical system. That is, the probabilities of both states
must be equal:
\begin{align}
\abs{\Psi(\vec x_1, \vec x_2)}^2 &= \abs{\Psi(\vec x_2, \vec x_1)}^2\\
\iff \Psi(\vec x_1,\vec x_2) &= e^{i\alpha}\Psi(\vec x_2, \vec x_1),
\end{align}
i.e.\ they can only differ in their complex phase, which doesn't affect any measurable
quantity. Repeating the exchange once more yields the original wave function,
\begin{align}
\Psi(\vec x_1,\vec x_2) &= e^{2i\alpha}\Psi(\vec x_1, \vec x_2)\\
\iff e^{i\alpha} &= \pm 1.
\end{align}
This result states that any wave function, upon the exchange of
indistinguishable particles, must be either symmetric (same sign) or
anti-symmetric (opposite sign) to that of the original. This is generalizable to
any number of particles, and is known as the \emph{Pauli exclusion principle}.
The following theorem summarizes the result~\cite{PhysRev-58-716}:
\begin{theorem}[Spin-Statistic
Theorem]\label{theorem:spin-statistic}
The wave function of a system of identical integer spin particles has the same value
when the positions of any two particles are swapped. Particles with wave functions
symmetric under exchange are called bosons.
The wave function of a system of identical half-integer spin particles changes sign
when two particles are swapped. Particles with wave functions antisymmetric under
exchange are called fermions.
\end{theorem}
\section{Observables - From Wave Function to Measurement}
\label{sec:obs-from-psi-to-Q}
We have repeatedly claimed that armed with the correct wave function we can compute any
measurable quantity of interest. Finally, we consider how exactly we can go
about doing so.
Assume we want to compute an observable $O$. The first step is to determine the
corresponding \emph{operator} $\hat O$. This is in general done by taking the classical
description of the observable and performing a canonical transformation.\footnote{There are
also quantities that do not have a classical analog (e.g.\ spin) for which we can still
find operator forms.} Most notably, we have for the following transformations for
position and momentum:
\begin{align}
\vb x &\rightarrow \hat\vx,\\
\vb p &\rightarrow -i\hbar\grad.
\end{align}
For example, as is often the case, let's say we want to compute the total energy of the
system. For $N$ particles that would classically be:
\begin{align}
H = \sum_{i=1}^N \frac{p_i^2}{2m_i} + V(\vb X),
\end{align}
where $\vb p_i$ denotes the momentum of particle $i$, all of which are placed in some
spacial potential $V$. It is easily verified that if we perform the above mentioned
substitutions we will recover \cref{eq:Hamiltonian-operator-general} and recognize
it as the Hamiltonian operator, $\hat H$.
Finally, having both the wave function and the appropriate operator $\hat O$ we can
proceed. Observables no longer have definite values in general as in classical
mechanics. Instead, we associate an expectation value with respect to the
\gls{pdf} described by $\Psi$:\footnote{Note that quantities
can still have definite values in certain states. This is then evident by the
expectation values having zero associated variance.}
\begin{align}
\expval{O}=\expval{\hat O}&= \frac{\expval{\hat O}{\Psi}}{\braket{\Psi}} \\
&= \frac{\int\dd{\vb X} \Psi^*(\vb X)\hat O(\vb X)\Psi(\vb X)}{\int\dd{\vb X}
\abs{\Psi(\vb X)}^2},
\end{align}
where $\int\dd{\vb X}\qty(\cdot)$ indicates an integral over all possible configurations of the
system (e.g.\ all possible position and spin values for each particle). Often we have
required the wave function to be normalized in such a way that the denominator is equal to
unity, and it can then be omitted.
For many-body systems it should be apparent that this integral quickly becomes intractable
to compute analytically. In practice we employ a numerical strategy to evaluate these
integrals, where the technique we use depends on the dimensionality of the integral and
the required level of accuracy. In our case, due to the large number
of degrees of freedom in the systems we shall investigate, we will use
\gls{mci}. This will be discussed in more detail in \cref{chp:monte-carlo}.
\section{Example Systems}
So far we have not presented any particular systems. In this thesis we focus our
attention on two particular systems for illustrative purposes. We chose these systems
for their simplicity and/or the amount of preexisting results available
in the literature. We do this in order to benchmark our results against
known exact solutions, or when these do not exist, against verified approximate
results available in the literature.
\subsection{Quantum Dots}
\label{sec:quantum-dots-theory}
We consider a system of electrically charged particles (e.g.\ electrons) confined in a pure
isotropic harmonic oscillator potential, with an idealized total Hamiltonian
given by:
\begin{align}
\begin{split}
\hat H &= \sum_{i=1}^N\qty(-\frac{1}{2}\laplacian_i + V_{ext}(\vec r_i)) +
\sum_{i < j} V_{int}(\vec r_i, \vec r_j)\\
&= \sum_{i=1}^N\qty(-\frac{1}{2}\laplacian_i + \frac{1}{2}\omega^2
r_i^2) + \sum_{i < j} \frac{1}{r_{ij}},
\end{split}\label{eq:H-QD-def}
\end{align}
where we use natural units ($\hbar=c=m_e=1$) with energies in
atomic units (a.u.), $N$ denotes the number of particles in the system, and
$\omega$ is the oscillator frequency of the trap. Further, $\vec r_i$
denotes the position vector of particle $i$, with $r_i \defeq \norm{\vec r}$ and
$r_{ij}\defeq \norm{\vec r_i - \vec r_j}$ defined for notational brevity.
This system describes particles trapped in a parabolic potential well that pulls
them towards the bottom at all times, while simultaneously feeling the repulsive
Coulomb forces from the other particles. This hinders all particles from settling together
at the bottom. Even for this somewhat idealized system, the interplay between these two
opposing forces gives rise to a surprisingly complex problem, which will prove remarkably
hard to solve analytically even for two particles, and utterly impossible for higher $N$.
With the natural units in place, the only involved quantity without a proper
unit is length, i.e.\ what unit does the $\vb r_i$ have. A convenient
choice is to consider the mean square vibrational amplitude, $\expval{r^2}$, for
a single particle at $T = \SI{0}{\kelvin}$ placed in the oscillator trap.
Computing the expectation value we get
$\expval{r^2}=\flatfrac{\hbar}{2m\omega}$, and we define the unit of length as the
characteristic length of the trap, $a_{ho}=\qty(2\expval{r^2})^{\flatfrac{1}{2}}=\qty(\flatfrac{\hbar}{m\omega})^{\flatfrac{1}{2}}$~\cite{mhj-compphys-II}.
In our case, we limit ourselves to $N=2$ interacting electrons in two
dimensions in a trap with a frequency such that $\hbar \omega =
1$.\footnote{Note that, due to the natural units, this implies that $\omega =
1$, which further means that $a_{ho} = 1$. It should be apparent why we use these
definitions, as it simplifies both units and expressions.} We do this because for
this case we have exact, analytical solutions for the ground state energy. With the
interaction term included, the ground state energy is $E_0 = \SI{3}{\au}$~\cite{Taut1993}.
This limitation is purely one of convenience, as having exact benchmarks makes for better
verification of results. Furthermore, limiting the size of the problem makes the required
computation time manageable, which is good when experimenting with different techniques.
\subsubsection{Simple Non-Interacting Case}\label{sec:simple-non-inter-HO}
If we omit the interacting terms in \cref{eq:H-QD-def} we have
the standard harmonic oscillator Hamiltonian:
\begin{align}
\label{eq:ho-no-interaction-hamiltonian}
\hat H_0 &= \sum_{i=1}^N\qty(-\frac{1}{2}\laplacian_i +
\frac{1}{2}\omega^2 r_i^2).
\end{align}
This Hamiltonian lends itself to analytical solutions, and the stationary
single particle states are (in 2D)~\cite{griffiths_schroeter_2018}:
\begin{align}\label{eq:ho-single-particle-orbitals}
\phi_{n_x, n_y}(x, y) &= A H_{n_x}(\sqrt\omega x)H_{n_y}(\sqrt\omega y)
e^{-\frac{\omega}{2}\qty(x^2 + y^2)},
\end{align}
for quantum numbers $n_x, n_y = 0, 1,\dots$, and the Hermite polynomials
$H_n$ (not to be confused with the Hamiltonians, and never to be mentioned again). The
ground state, $n_x=n_y=0$ is simply
\begin{align}
\label{eq:ho-no-interaction-ground-state}
\phi_{00}(x,y) =
\sqrt{\frac{\omega}{\pi}}e^{-\frac{\omega}{2}\qty(x^2+y^2)}.
\end{align}
Using this wavefunction we can calculate the ground state
energy for one particle,
\begin{align}
\epsilon_{00} = \frac{\expval{\hat H_0}{\phi_{00}}}{\braket{\phi_{00}}}
= \omega = \SI{1}{\au}
\end{align}
The ground state wavefunction for the (unperturbed) two-electron case is simply the
product of the one-electron wave functions,
\begin{align}
\begin{split}
\Phi(\vec r_1, \vec r_2) &= \phi_{00}(\vec r_1)\phi_{00}(\vec r_2)\\
&= \frac{\omega}{\pi} e^{-\frac{\omega}{2}\qty(r_1^2+r_2^2)}.
\end{split}\label{eq:Phi-non-inter}
\end{align}
We can once again evaluate the ground state energy analytically, which yields
\begin{align}
E_0 = \frac{\expval{\hat H_0}{\Phi}}{\braket{\Phi}}
= 2\omega =\SI{2}{\au}
\end{align}
This result is not surprising, as adding one more particle, without any
interactions, should simply double the energy. Another way to look at it is
that the simple harmonic oscillator solution gives $\flatfrac{\omega}{2}$
per degree of freedom, so adding another two yields and extra $\omega$.
When the two particles are electrons, we may say something about their total
spin. As electrons are fermions, their total wavefunction must be
anti-symmetric upon interchanging the labels $1$ and $2$.
\Cref{eq:Phi-non-inter} is obviously symmetric, and so the
spin-wavefunction must necessarily be anti-symmetric. For the combination of
two spin-1/2 particles, there is only one candidate, namely the spin-$0$
singlet:
\begin{align}
\chi_0 = \frac{1}{\sqrt 2}\qty(\ket{\uparrow\downarrow} -
\ket{\downarrow\uparrow}).
\end{align}
A similar argument can be made for particles with different spins.
\subsubsection{Considerations from the Virial Theorem}
The virial theorem gives a general relation for the time-averaged kinetic
energy $\expval{K}$ and the corresponding potential energy
$\expval{V_{pot}}$ of a stable system of $N$ particles. In general the
theorem states:
\begin{align}
\expval{K} = -\frac{1}{2}\sum_{k=1}^N \expval{\vec F_k \cdot \vec
r_k},\label{eq:virial-theorem}
\end{align}
where $\vec F_k$ denotes the combined forces acting on particle $k$, located
at position $\vec r_k$. For a radial potential on the form $V(r)=ar^n$, such
that the potential between any two particles in the system depends on some
power of the inter-particle distance, the
theorem takes the following form:
\begin{align}
\expval{K} = \frac{n}{2}\expval{V_{TOT}}
\end{align}
where $V_{TOT}$ denotes the sum of the potential energy $V(r)$ over all
pairs of particles.
Although the harmonic oscillator potential does not depend on the
\emph{inter-particle} distance, but rather on the positions of each particle,
it % TODO, what was the edit?
works out to the same relation in our case. Computing the full relation for
our Hamiltonian for two electrons in two dimensions, it even works out so
that we can use the same relation on the harmonic oscillator potential and the Coulomb
potential separately, and add the result. This means that the virial theorem
predicts the following~\cite{Katriel2012}:
\begin{align}
\expval{K} = \expval{V_{ext}} -
\frac{1}{2}\expval{V_{int}}.\label{eq:virial-result}
\end{align}
Note that this implies that we should consider the \emph{total} kinetic
energy, and the \emph{total} external and internal potential energies, as opposed to per
particle.
\subsection{Liquid $^4$He}
\label{sec:liquid-helium-theory}
Consider now an infinite collection of helium atoms ($^4$He) packed with a given density,
$\rho$. As infinities are hard to work with, we model this by considering a
cubic simulation box with side lengths $L$ and periodic boundary conditions. The
infinite collection is then composed of stacking copies of such simulation boxes
together. ~\cref{fig:pbc-illustration} shows an illustration of the idea.
\begin{figure}[h]
\centering
\input{illustrations/PBC-illustration.tex}
\caption[Illustration of periodic boundary conditions]{Illustration of $^4$He organized into a grid of identical simulation
boxes. The actual boxes are three-dimensional.\citesource{writing/illustrations/PBC-illustration.tex}}
\label{fig:pbc-illustration}
\end{figure}
The Hamiltonian for this system is
\begin{align}
\hat H &= -\sum_{i=1}^N \frac{\hbar^2}{2m}\laplacian_i + \sum_{i < j} V(r_{ij})
\end{align}
i.e.\ the kinetic energy of all atoms, plus an interaction potential dependent on
the distance between all pairs of atoms. The mass $m$ is the mass of one $^4He$
atom. The form of $V$ is not known analytically, but is experimentally probed to
great accuracy. Theorists have since fitted specific functional forms to the
experimental data, and we will do our calculations using one of these
potentials. The most commonly used is the simple Lennard-Jones (LJ)
potential~\cite{Kalos-1981}:
\begin{align}
\label{eq:Lennard-Jones-def}
V(r)
&= 4\epsilon\qty[\qty( \frac{\sigma}{r} )^{12} - \qty( \frac{\sigma}{r} )^6 ]
\end{align}
with $\epsilon/\kappa = \SI{10.22}{\K}$\footnote{$\kappa$ is the Boltzmann constant.} and $\sigma = \SI{2.556}{\angstrom}$. This
models the competing forces of the atoms' mutual repulsion and attraction. The
positive term describes the short range Pauli repulsion due to overlapping
electron orbitals, and the negative term describes the long range attraction
due to phenomena such as van der Waals forces.
We can also use the slightly more accurate (and complicated) potential named HFDHE2~\cite{Aziz-hfdhe2}:
\begin{align}
\label{eq:HFDHE2-def}
V(r) &= \epsilon
\left\{\!\begin{aligned}
&A \exp(-\alpha \frac{r }{ r_m})\\
&- F(r) \qty[ C_6 \qty(\frac{r_m }{ r})^6 + C_8 \qty(\frac{r_m }{ r})^8 + C_{10} \qty(\frac{r_m }{ r})^{10}]
\end{aligned}\right\}
\end{align}
with
\begin{align}
F(r) &= \begin{cases}
\exp( - [D \frac{r_m}{r} - 1]^2 ) & \qfor \frac{r}{r_m} \leq D\\
1 & \qotherwise
\end{cases}
\end{align}
with the following parameters:
\begin{align}\label{eq:HFDHE2-parameters}
\begin{split}
&A = \num{0.5448504e6}\\
&\alpha = \num{13.353384}\\
&D = \num{1.241314}\\
&r_m= \SI{2.9673}{\angstrom}
\end{split}
\begin{split}
&\epsilon/\kappa = \SI{10.8}{\K}\\
&C_6= \num{1.37732412}\\
&C_8= \num{0.4253785}\\
&C_{10}= \num{0.178100}\\
\end{split}
\end{align}
Both potentials grow rapidly for small $r$, and tend to $0$ for large $r$. The
interesting sections of both potentials are shown
in~\cref{fig:helium-potentials-plot}. The potentials are very similar, with the
main difference being the depth of the well and how sharply the potential dies
off.
\begin{figure}[h]
\centering
\input{illustrations/helium-potentials.tex}
\caption[Lennard-Jones and HFDHE2 potentials]{Lennard-Jones and HFDHE2 potentials used to model the potential
between pairs of $^4$He atoms. Both potentials grow rapidly towards infinity
when $r\to0$ and approach zero when $r\to\infty$.\citesource{writing/illustrations/helium-potentials.tex}}
\label{fig:helium-potentials-plot}
\end{figure}
\subsubsection{A Note About Units}
In the literature it is common to express the energies in Kelvin per particle,
and lengths in angstrom~\cite{Kalos-1981, Aziz-hfdhe2, ruggeri2018}. In order to convert energies to temperatures we
divide by the Boltzmann constant, $\kappa$, because it has the unit of Joules per
Kelvin. The Hamiltonian becomes:
\begin{align}
\hat H &= -\sum_{i=1}^N \frac{\hbar^2}{2m\kappa}\laplacian_i + \sum_{i<j}\frac{1}{\kappa}V(r_{ij}).
\end{align}
If we use SI units for the constants involved we get:
\begin{align}
\frac{\hbar^2}{2m\kappa} = \SI{6.059651974e-20}{\metre^2\kelvin} = \SI{6.059651974}{\angstrom^2\kelvin},
\end{align}
which turns out to be a reasonably sized number when we use angstrom as units for
lengths.\footnote{Note that $\laplacian$ has units of $\text{length}^{-2}$, so
the units work out to Kelvin.} We will use these values in the implementation,
and simply refer to energies in Kelvin when we study this system. However, if
the reader ever wants to convert the units, for comparison with other works
perhaps, simply multiplying with the value of $\kappa$ in the unit system of
choice should yield the corresponding energy.
\subsubsection{Minimum Image}
Because we assume a periodic structure we must take this into account when
calculating distances. Consider two particles, A and B, located at opposite
corners of the simulation box. What is the distance between them? The intuitive
answer is $\norm{\vb r_A - \vb r_B} = \sqrt{3}\,L$, i.e.\ the length of the
diagonal of the cubic box. Nevertheless, the answer we should use is zero. The
reason is that there is a periodic copy of the box stacked such that A and the
periodic B copy are located at the same corner. This way of calculating
distances is called minimum image, and says that we should use the shortest
possible distance. In general, if two particles have a distance of $\Delta x =
\flatfrac{L}{2} + \delta_x$ ($\delta_x\geq 0$, each spatial coordinate handled
individually), the minimum image distance is $\Delta x_{\mathit{min}} = \Delta x
- L = \flatfrac{L}{2} - \delta_x$.
In our implementation, whenever we need a distance between two particles, we use
the following prescription (example in Python):
\begin{lstlisting}[language=Python]
import numpy as np
# Example coordinates.
L = 5.0
p1, p2 = np.array([0, 0, 0]), np.array([2, 3, 4])
diff = p2 - p1 # [2, 3, 4]
diff_minimum = diff - L * np.round(diff / L) # [2, -2, -1]
\end{lstlisting}
The last variable, \texttt{diff\_minimum} represents the minimum image distance
vector and this is the one used for any further calculations.
\subsubsection{Correcting for Periodicity in Potentials}
The potential $V(r_{ij})$ depend on the inter-particle distances. However,
there is an infinite amount of particle pairs if we consider the system as a
whole. We use periodic boundary conditions, and shall only consider pairs where
both particles are in the simulation box (but still respecting the minimum image
convention). This limitation excludes any interactions that act on length scales
larger than $\flatfrac{L}{2}$, and this can have a significant impact on the
total system.
In an attempt to limit this effect, we modify the potentials slightly. First we
explicitly truncate the potential to be zero for large distances, and shift it
slightly so that the function remains continuous. That is, considering $V(r)$
from~\cref{eq:Lennard-Jones-def}, we change it as follows:
\begin{align}
\label{eq:Lennard-Jones-truncated-def}
V_\mathit{trunc}(r) &=
\begin{cases}
V(r) - V(\flatfrac{L}{2}) & \qfor r \leq \flatfrac{L}{2}\\
0 & \qotherwise
\end{cases}.
\end{align}
Note that in order for this truncation to be sensible, we must use a
sufficiently large box so that $V(\flatfrac{L}{2})$ is sufficiently close to
zero. What exactly \emph{sufficiently} means is left rather vague, but we
mention it as a potential source of error.\\
The truncation obviously leads to slightly less precise results, but this can
partially be corrected for with so-called \emph{tail corrections}. The approach
is to model the potential contribution of all particles further away than
$\flatfrac{L}{2}$ in a mean-field manner. The result is simply adding a constant
term, and acts only to shift the total potential in a given direction. As the
purpose of this thesis is not to obtain the most realistic results possible, and
rather a \emph{relative} comparison of methods, we will not spend more time on
specific ways of implementing such corrections.
\end{document}
| {
"alphanum_fraction": 0.7419264861,
"avg_line_length": 48.3023952096,
"ext": "tex",
"hexsha": "135ddfbc0ef6af845cbee0233aca2b0c16aefe12",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2021-11-03T16:37:38.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-04T15:17:24.000Z",
"max_forks_repo_head_hexsha": "72b879e7978364498c48fc855b5df676c205f211",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bsamseth/FYS4411",
"max_forks_repo_path": "writing/QuantumTheory.tex",
"max_issues_count": 22,
"max_issues_repo_head_hexsha": "5453cd5c3230ad7f082adf9ec1aea63ab0a4312a",
"max_issues_repo_issues_event_max_datetime": "2019-07-18T09:42:13.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-19T10:49:26.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "johanere/qflow",
"max_issues_repo_path": "writing/QuantumTheory.tex",
"max_line_length": 155,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "5453cd5c3230ad7f082adf9ec1aea63ab0a4312a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "johanere/qflow",
"max_stars_repo_path": "writing/QuantumTheory.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-11T18:18:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-07-24T21:46:24.000Z",
"num_tokens": 8835,
"size": 32266
} |
%***********************************************%
% %
% EECS 470 - Lab 01 Assignment %
%<------------------> %
% Last Modified by: %
% William Cunningham on 2014-9-03 %
% %
%***********************************************%
%***********************************************%
% Preamble %
%***********************************************%
\documentclass[dvipsnames]{article}
\usepackage{xcolor}
\usepackage[os=win]{menukeys}
\usepackage{
tikz,
colortbl,
graphicx,
amsmath,
amssymb,
mathrsfs,
hyperref,
float,
siunitx,
fancyhdr,
url,
minted,
cleveref
}
\usepackage
[left=1in,top=1in,right=1in,bottom=1in]
{geometry}
\pagestyle{fancy}
%--- Header ---%
\newcommand{\courseNumber}{EECS 470}
\newcommand{\courseTitle}{Computer Architecture}
\newcommand{\university}{University of Michigan, Ann Arbor}
\newcommand{\labdate}{Monday, January 17$^\text{th}$, 2021}
\lhead{
\small{
\university
}
}
\rhead{
\small{
\emph{Date: \labdate} \hspace*{-1em}
% Why is the above \hspace necessary?
}
}
\newcommand{\shortbar}{
\vspace*{-12pt}
\begin{center}
\rule{5ex}{0.1pt}
\end{center}
}
\newcommand{\lab}[1]{
\begin{center}
\LARGE{
\vspace*{-32pt}
EECS 470 Lab #1 Assignment
\shortbar
\vspace*{-20pt}
}
\end{center}
}
%***********************************************%
% %
% TikZ Definitions %
% %
%***********************************************%
\usetikzlibrary{shapes,arrows,automata,shadows}
\pgfdeclarelayer{background}
\pgfdeclarelayer{foreground}
\pgfsetlayers{background,main,foreground}
% Block Diagram Styles
\tikzstyle{block} = [draw, fill=RoyalBlue!50, rectangle,
minimum height=2cm, minimum width=2cm, rounded corners]
\tikzstyle{sum} = [draw, fill=blue!20, circle, node distance=2cm]
\tikzstyle{input} = [coordinate]
\tikzstyle{output} = [coordinate]
\tikzstyle{branch} = [coordinate]
\tikzstyle{pinstyle} = [pin edge={to-, thin, black}]
% Signal Flow Graph Styles
\tikzstyle{state} = [draw, fill=RoyalBlue!50, circle,
minimum height=3em]
%***********************************************%
% %
% Document %
% %
%***********************************************%
\begin{document}
\lab{1}
\section*{Note:}
\begin{itemize}
\item Please review the \href{https://caenfaq.engin.umich.edu/linux-login/how-do-i-connect-to-a-caen-linux-computer-remotely}{\underline{CAEN VNC help page}} to get setup for the rest of this lab.
\item Please review the \href{https://docs.google.com/document/d/1U9FOOYAPqvhSQda-v66SCmUgdvuaBs1KIK8Ht4_WSCA/edit?usp=sharing}{\underline{GTKwave Waveform Viewer tutorial}} as a fallback option instead of DVE. The tutorial below explains how to use DVE. DVE is a more powerful tool but is often very slow when used remotely.
\item The lab should be completed individually.
\item The lab must be checked off by a GSI before end of lab on Friday,
January 29$^\text{th}$, 2021.
\begin{itemize}
\item It is \texttt{highly} recommended you complete the lab before the following week's lab release
\end{itemize}
\end{itemize}
\section{Linux Introduction and Setup}
The work in this class will be done using tools installed in the CAEN Linux
environment. You will need to log in to Linux if you haven't already done so.
Once you are logged in, you will need to open a terminal; right click on the
desktop and select ``open terminal'' from the list that pops up.
There are a number of useful things that you can do in the terminal besides
running the tools associated with this class. Here is a short introduction to a
few of the most useful items.
\subsection{File System Utilities}
The file system that your home directory is stored in is an Andrew File System
(AFS) cell. To interact with this, you run the command \texttt{fs}. There are
many useful subcommands, including setting file permissions (\texttt{fs
setacls}) and checking your space allocation (\texttt{fs quota}). The latter is
quite important because the tools used in this class can use up your entire
space allocation if you're not careful. This tutorial is unlikely to do so, but
the final project very well might.
\subsection{Text Editors}
CAEN supplies a number of text editors. It is beyond the scope of this tutorial
and this class to proselytize for one over the other, but we do recommend that
you get familiar with at least one terminal-based text editor, which generally
means \texttt{vim}, \texttt{emacs} or \texttt{nano}. Graphical text editors
available include SublimeText2 and Gedit.
\subsection{Bourne Again Shell (\texttt{bash})}
We will discuss the shell in much greater depth in lab 5, but you might want to
do some reading on your own.
\subsection{Setup}
This tutorial has a set of starter files, which you need to download from the course web site under ``Schedule".
% The
% easiest way is to use the \texttt{wget} tool, which goes something like:
% \noindent
% \texttt{\$ wget www.eecs.umich.edu/courses/eecs470/labs/lab1.tar.gz}
\noindent
Once you have the tarball, you need to extract it:
\noindent
\texttt{\$ tar -xvf lab1.tar.gz}
\noindent
Now that you have the files, it's time to get started on the tutorial.
\section{Synopsys Tools}
\subsection{The Build System: GNU Make}
Due to the complexity of the Synopsys tools, we are using the GNU Make build
system to compile and run simulations. You will need to get familiar with this,
particularly for the final project. We will be giving you a much more thorough
introduction next week in lab.
For most of your assignments before the final project, you will simply be able
to reuse the Makefile we provide with this lab without much, if any,
modification. You will be required to reuse the standard naming conventions in
this \texttt{Makefile}.
\begin{figure}[H]
\inputminted[frame=lines,obeytabs,tabsize=4,firstline=15,lastline=15]{makefile}{Makefile}
\inputminted[frame=lines,obeytabs,tabsize=4,firstline=25,lastline=27]{makefile}{Makefile}
\caption{Makefile variables}
\label{fig:makevar}
\end{figure}
We will now walk through the provided Makefile to help you understand the
basics. \Cref{fig:makevar} shows the way that Make handles variables. The
declaration of the \texttt{VCS} variable is pretty standard. Here, we use that
variable to hold the command for compiling the simulator for your design. You
will not need to modify this variable, but you will need to modify the variables
for the design files.
\begin{itemize}
\item \texttt{TESTBENCH} - this variable holds the name of your testbench
file
\item \texttt{SIMFILES} - this variable holds the names of all the
synthesizable Verilog files you are working with
\item \texttt{SYNFILES} - this variable holds the names of all the
synthesized Verilog files, which are generated by the synthesis tool and
typically end in \texttt{*.vg}.
\end{itemize}
After the variables are declared, the rest of the file is dedicated to Make
rules. These rules are the components that allow Make to do dependency
resolution. You may want to add a few, particularly related to synthesis, but we
will save the details on that for Lab 2.
\subsection{The Simulator: \texttt{simv}}
Verilog, as a hardware description language, cannot be ``compiled'' in the same
way that C or Haskell might be compiled. The way we test or run a design once it
has been implemented in such a hardware description language is to simulate it.
The easiest way to do this simulation is to build a software simulator
automatically, which is what the Synopsys VCS tool does. It takes in a Verilog
design description you've created and builds a simulator in C++ that changes
values the way you've described and prints appropriately. This is generally used
as a first pass to make sure that the design works the way that is intended.
It's time for you to try this out. In the directory you've extracted all the
provided files, run the command: \texttt{make}.
This will have resulted in an error message. What went wrong? It turns out that
we have a syntax error on line 7 of \texttt{tut\_mod.v}: \texttt{loggic} should be
\texttt{logic}. Fix the error (open the file in your choice of editor, see above),
save the file and then run \texttt{make} again. This time you should see the
output of the \texttt{\$monitor(\dots)} call in \texttt{tut\_test.v}. The Make rule
in to simulate also redirects the output into a file called \texttt{program.out}
(this is explained in more detail in Lab 5). This is particularly useful when
you have a lot of output.
\subsection{The Debugger: \texttt{dve}}
This simple error was easy to find and fix, but many (most) errors are not. For
the more complicated errors, Synopsys provides a debugging tool, called DVE, for
hardware description designs. This debugger looks much more akin to the debugger
you used in 270 (or your equivalent undergraduate digital design course) than
the ones you would have used in 280/281 (or your equivalent undergraduate
programming courses).
The command to run the debugger is in another Make rule, and you can run it with
the command: \texttt{make dve}. Run that command now. This should open a
window, which we will now refer to as the DVE window. We will now walk through
the default panes of the DVE window, but you can add more panes yourself
through the \menu[,]{Window,Panes} menu.
\subsubsection{Module Hierarchy Pane}
\begin{figure}[H]
\begin{center}
\includegraphics[scale=0.85]{mod-hier-pane}
\caption{The DVE Window with the Module Hierarchy Pane highlighted}
\label{fig:mod-hier-pane}
\end{center}
\end{figure}
In \cref{fig:mod-hier-pane}, the pane on the far left of the DVE window is the
Module Hierarchy Pane. It contains the names of the signals you might be
interested in, grouped hierarchically by module. You will need this list to
select signals you want to look at in other views and panes.
In our tutorial, we have a testbench which instantiates another module. Expand
the testbench (click the plus icon to the left) in the pane to see the submodule. You'll notice that the submodule is called \texttt{tbp}, which is not the name of the \texttt{two\_bit\_pred}
module, but rather the name of the instantiation in the testbench. Make sure to use
meaningful names for instantiations when you implement your own designs.
\subsubsection{Data Pane}
\begin{figure}[H]
\includegraphics[width=\textwidth]{data-pane}
\caption{The DVE Window with the Data Pane highlighted}
\label{fig:data-pane}
\end{figure}
In \cref{fig:data-pane}, the data pane, located in the middle of the DVE window
is highlighted. This pane lists the signals in the currently selected module in
the module hierarchy pane. This is where you select the signals whose waveforms
you would like to view. You can select multiple signals by holding down \keys{\shift}(shift) and clicking to select a group
or holding down \keys{\ctrl} and clicking to select multiple individual
signals. Select all the signals available in the testbench and right click on
the highlighted group, then go to \menu[,]{Add to waves,New wave view} to open the
waveform viewer. If, at some point later on, you need to add more signals to the
waveform viewer, you can do so by selecting them here and selecting
\menu[,]{Add to waves,Add to [wave name]}.
\subsubsection{Waveform Viewer}
\begin{figure}[H]
\includegraphics[width=\textwidth]{waveform-viewer}
\caption{The Waveform Viewer Window}
\label{fig:waveform}
\end{figure}
You will see a new window pop open, called the Waveform Viewer, which is shown
in \cref{fig:waveform}. In this new window, you need to go to
\menu[,]{Simulator,Start/continue} or press \keys{F5} to start the simulation and
populate the waveform viewer. At this point, if you added additional signals to
this view, they would have no waveform, until you reran the simulator. This can
be done by \menu[,]{Simulator,Stop} then \menu[,]{Simulator,Start} or by
pressing \keys{F5} twice.
The waveform viewer is intended to facilitate debugging by showing you how
several signals change in relation to one another over time. The pane on the
left hand side of this window lists the names of the signals currently being
displayed. The right hand pane shows the signals themselves. At the bottom of
this window there is a scroll bar which moves through time, when zoomed in to
see specific signal transitions. Move to the right to see how the signals in the
module change. Now, try zooming in and out, as well as getting all of time on
the screen at once by going to \menu[,]{View,Zoom} or clicking one of the
magnifying glass buttons at the top.
Signals in the waveform viewer can be displayed several different ways, largely
related to the 4 state logic used in Verilog. The ``good'' signals \texttt{0}
and \texttt{1} are shown as green signal lines with transitions between the two
values as appropriate. The ``bad'' signals \texttt{X} and \texttt{Z} show up as
a red block where the signal should be and a yellow line half way between
\texttt{0} and \texttt{1}, respectively. Both of these can be seen in the
provided module. Try to find them now. The signal \texttt{prediction} is an
example of the unknown value, \texttt{X}, after time 26, and the \texttt{taekn}
signal is an example of the unconnected value, \texttt{Z}. These all apply to
1-bit signals. When we have a larger bus, values are displayed with their
hexadecimal value with marks where the value transitions. If any part of a bus
is \texttt{X}, it will show the whole thing as \texttt{X}, and similarly for
\texttt{Z}, it will show the hexadecimal value with the unconnected portions
marked with a \texttt{z}. If you would rather have buses display their value in
some other base, you can right click on the signal value in the left hand pane
and choose the \menu[,]{Radix} option. You can also expand the bus into all its
individual signals by clicking on the plus next to the signal name.
You can click on the waveform itself to drop a marker, which helps line up
values across many signals at the same time. At the top of the waveform pane,
once you've placed a marker, there is a number corresponding to the time you've
set the marker at. Right clicking on this number gives you the option to advance
forward by an amount or to the next clock tick.
Signals can be dragged around to group them more logically/helpfully. You can
also remove signals by highlighting them and then pressing the \keys{delete}
key.
\subsubsection{Source Pane}
\begin{figure}[H]
\begin{center}
\includegraphics[scale=0.85]{source-pane}
\caption{The DVE Window with the Source Pane highlighted}
\label{fig:source-pane}
\end{center}
\end{figure}
The source pane is on the right hand side of the DVE window, and is highlighted
in \cref{fig:source-pane}. It shows the Verilog source for the module you're
currently examining. This allows you to examine what exactly is generating a
signal. It can also let you double check that the file you've just edited to fix
a bug is the one you're now compiling/simulating. To change the file being
displayed, drag a module from the hierarchy pane over to see it. Similarly,
dragging a signal name over to the source pane to highlight its definition. To
view the current values of signals from the simulator in the source pane, right
click on the source pane and select \menu{Annotate Values}. You should now see
values next to each signal.
\subsubsection{Schematic Pane}
\begin{figure}[H]
\includegraphics[width=\textwidth]{schematic-viewer}
\caption{The Schematic Viewer}
\label{fig:schematic-viewer}
\end{figure}
\Cref{fig:schematic-viewer} shows the schematic viewer. You can open up a
schematic of any particular module by right clicking on it in the module
hierarchy pane and selecting \menu{Show Schematic} from the menu. This can be
useful in tracking where signals are coming from after synthesis.
\subsubsection{Debugging Example}
Now that you've learned about many of the features of DVE, it's time to use it
to debug the module you've been given.
The output of the \texttt{two\_bit\_pred} module is the \texttt{prediction}
signal. This signal goes to \texttt{X} at time 25 and stays there for the rest
of the simulation. Why would that happen? Let's figure it out.
For a signal to be unknown, one of its drivers must also be unknown, which if we
follow this logic down all the way means that one of the inputs somewhere is
unknown. So,we need to start by looking at how \texttt{prediction} gets set. The
source for \texttt{tbp} should already be open, so let's use that to trace back
to where the problem is. Let's also add the \texttt{tbp} module signals to the
waveform viewer so we can see the signals in that module. Looking at the source,
we can see that \texttt{prediction} is simply set equal to a bit of signal
\texttt{state}. Looking at the waveform viewer, we see that, as expected,
\texttt{state} is unknown at the same times. Now we want to check the signals
that define \texttt{state}. We can see that \texttt{state} either takes on a
known value of \texttt{01}, or it takes on the value of \texttt{next\_state}.
So again, now we need to trace back to which values set \texttt{next\_state}.
Looking at this, we can see that \texttt{next\_state} is determined by
\texttt{state} and \texttt{taken}. We already know state is broken, so we need
to look at \texttt{taken}. On the waveform viewer, we see that \texttt{taken}
always has a value of \texttt{Z}, unconnected. Because \texttt{taken} is an
input into this module, we know the problem is in how the testbench is
connecting to this module. Now let's drag the \texttt{testbench} module into the
source viewer so we can look for the problem. Looking at where \texttt{tbp} is
instantiated, we can see that there was a typo. \texttt{.taken(taekn)} should
have been \texttt{.taken(taken)} on line 7. The compiler assumed \texttt{taekn}
was just a wire not being driven and although it gave a warning, it did not give
a compiler error on this. These little bugs can often be very annoying to deal
with.
Now that we've found the bug, we want to save the waveform viewer configuration
so we can just easily reload them when we run the waveform viewer again. It may
not seem to be a big deal right now, but eventually when you're working with
hundreds of signals, only a few of which are related to a problem you're trying
to debug, it can be very annoying to try to find and place them on the waveform
viewer every time. To do this, on the waveform viewer go to \menu[,]{File,Save
Session}. Type in a filename, like \texttt{view01.tcl} or, in the future, any
name you want. It will be useful to keep the names descriptive as there may be
different sets of signals to look at for different parts of a module you're
testing. Now exit DVE.
Use your favorite text editor to fix that typo in \texttt{tut\_test.v}. When
you're finished, type make clean and then \texttt{make dve} to recompile and
reload the waveforms so we can double check that it's working properly now. Now
on the DVE window, go to \menu[,]{File,Load Session}. Select your
\texttt{view01.tcl} file and press OK. The waveform viewer, complete with the
original signals, should appear. Run the simulation again. Here we can see, that
everything is working properly now.
Now, let's find another bug. Open up the provided \texttt{Makefile} and change
the value of the \texttt{SIMFILES} variable to \texttt{tut\_mod2.v} This module
has an infinite loop, which requires one more option in DVE. First, run
\texttt{make}, but when you've noticed that it hangs without printing anything,
kill the job with \keys{\ctrl + $\backslash$}. Now, run \texttt{make clean} to
remove the files the last command created and then run \texttt{make dve} to
start debugging the infinite loop.
The DVE window should appear. Now let’s find that infinite loop. Add the signals
to the waveform viewer. To find out where it is, run the simulator with the
\keys{F5} key. The waveform viewer should hang and a red dot in the toolbox
above should get enabled. Now click on \menu[,]{Simulator,Terminate}. You can
now see that the simulation was hung at time 25. The problem must be around here
somewhere. We are now at time 30. Now let’s find out where we’re looping. Click
on \menu[,]{Simulator,Step/Next,Next} a few times and look at the tbp source
code. We seem to be going between \texttt{tut\_mod2.v:11} and
\texttt{tut\_mod2.v:12}. That means that lines 11 and 12 of \texttt{tut\_mod2.v}
are causing the problem. We now know that the problem is in our module (not our
testbench). In the tbp source, right click and select \menu{Annotate Values} to
monitor the values of each wire. We want to see how we got to this state. If we
look at the source window, we can see that lines 11 and 12 are assignments to
\texttt{loop1} and \texttt{loop2}. If we click next a few times and watch the
values, we can see that both \texttt{loop1} and \texttt{loop2} keep changing
even though no time is actually passing. This is a sure sign of circular logic.
Since \texttt{loop1} is dependent on \texttt{loop2}, and \texttt{loop2} is
dependent on \texttt{loop1}, the circular path is fairly obvious in this case.
We now have enough information to fix the bug, but in this example, there’s
nothing to really fix, since the \texttt{loop1} and \texttt{loop2} variables are
not used for anything, so we can just leave the design alone and finish up.
This concludes our introduction to DVE, but there are a number of other useful
features in the waveform viewer. Feel free to explore and try other things on
your own.
\subsection{The Synthesis Tool: \texttt{dc\_shell}}
In this class we require that your hardware designs really represent hardware
and we judge your final project on the overall speed of your design. To
accomplish this you will need to synthesize your design. The synthesis tool
attempts to create an actual circuit level implementation of your Verilog design
description. This circuit level design is actually a Verilog file itself, but it
is structural Verilog and uses a library of standard cells that another tool can
lay out on a real chip. One benefit of the output being a Verilog file is that
we can simulate the circuit level design in the same way we simulate your
behavioral design. Thus, we can test your synthesized design and if it does not
behave properly your design is not considered to be synthesizable and is
therefore incorrect (though we do give partial credit).The clock speed of the
circuit that it generates will be the clock speed we use for your design. We
won’t go into great detail about synthesis in this tutorial but we will show you
the basic commands. We provide greater depth in Lab 2.
We interact with the synthesis tool, Synopsys Design Compiler, through a script
of options and commands written in the Tool Control Language, which is
abbreviated TCL (\texttt{*.tcl}) and spoken like the word ``tickle.'' We have
provided you with a synthesis script called \texttt{tut\_synth.tcl}, which is
run by a Make rule, shown in \cref{fig:make-syn}. Open the script in the editor
of your choice. Read through it, the commands are actually pretty intuitively
named, though some of them are a bit confusing. We will explain more of this to
you later, but the part you need to be familiar with now is marked with a
comment saying ``\texttt{The following five lines must be updated for every new
design}.'' The first line of this section tells the tool what Verilog design
files to read. The second line tells the tool the name of your toplevel module.
The third line names the clock signal, and the fourth line sets the clock period. A higher clock period will yield faster synthesis times, while a lower
one will take longer because it requires more effort on the part of the
synthesis tool. A clock period set too small may not even be possible. We will
explore this design constraint in Project 2.
\begin{figure}[H]
\begin{minted}[frame=lines,obeytabs,tabsize=4]{makefile}
two_bit_pred.vg: tut_mod.v tut_synth.tcl
dc_shell-t -f tut_synth.tcl | tee synth.out
\end{minted}
\caption{The synthesis Make rule.}
\label{fig:make-syn}
\end{figure}
You will need to add the Make rule from \cref{fig:make-syn} to your
\texttt{Makefile}. \textbf{Do not simply copy-paste it, retype it}. Make is very
persnickety about whitespace, so make sure you put a single tab after the rule
name (after the colon on the first line) and another single tab at the beginning
of the second line. Once you have the rule added and the correct names in
\texttt{tut\_synth.tcl}, run \texttt{make syn} to synthesize and simulate the
synthesized design.
This will produce four files. Please open them and read them.
\begin{enumerate}
\item \texttt{two\_bit\_pred.chk} -- This file contains errors if something
went wrong, otherwise it will be empty (or occasionally contains a single number; you can ignore that).
\item \texttt{two\_bit\_pred.ddc} -- This file contains the proprietary
Synopsys representation of the synthesized design. It can be included
like a Verilog design file in synthesis, either to be optimized again or
as a black box.
\item \texttt{two\_bit\_pred.rep} -- This file contains the timing report
for your design. We will cover it in greater detail in Lab 2.
\item \texttt{two\_bit\_pred.vg} -- This file contains the structural
Verilog that the synthesis tool generated for your design and design
constraints.
\end{enumerate}
\section{Assignment}
You will now write and debug a design from scratch, though you probably want to
use designs we've shown you as templates.
\subsection{Design}
\begin{figure}[H]
\centering
\begin{tikzpicture}[auto, node distance=3cm,>=latex',every text node
part/.style={align=center}]
\def\blockdist{2em}
\def\edgedist{2em}
\begin{pgfonlayer}{foreground}
\node[state] (center) {grant to \\ neither};
\node[state,left of=center] (left) {grant to \\ A};
\node[state,right of=center] (right) {grant to \\ B};
\draw[->] (center.145) -- node[pos=0.5,anchor=south] {\small A request}
(left.35);
\draw[->] (left.325) -- node[pos=0.5,anchor=north] {\small no A request}
(center.215);
\draw[->] (center.35) -- node[pos=0.5,anchor=south] {\small B request, \\ no
A request} (right.145);
\draw[->] (right.215) -- node[pos=0.5,anchor=north] {\small no B
request} (center.325);
\draw[->] (center) to[out=255,in=285,looseness=5]
node[pos=0.5,anchor=north] {\small no request} (center);
\draw[->] (left) to[out=255,in=285,looseness=5]
node[pos=0.5,anchor=north] {\small A request} (left);
\draw[->] (right) to[out=255,in=285,looseness=5]
node[pos=0.5,anchor=north] {\small B request} (right);
\end{pgfonlayer}
\begin{pgfonlayer}{background}
\path (center.north -| left.west)+(-1,1.0) node (a) {};
\path (right.east |- right.south)+(1,-1.75) node (b) {};
\path[fill=RoyalBlue!25, rounded corners, draw=black!75, dashed] (a) rectangle (b);
\end{pgfonlayer}
\end{tikzpicture}
\caption{This is a Moore state machine for an arbiter, which conceptually
would be connected to two requesters, A and B, at some higher level, and
should provide signals to each indicating whether control has been
granted to it.}
\label{fig:state}
\end{figure}
Begin by examining the state machine in \cref{fig:state}. What are the inputs
and the outputs? What state needs to be stored, and given this, what registers
do you need? What does the combinational logic look like?
\subsection{Implementation}
The Verilog you write to implement the state machine above should be put into a
file called \texttt{arbiter.v}. You will also need a testbench,
\texttt{arbiter\_test.v}, to test the module. Ideally you should try to write a
testbench that produces the correct output for the module to test against, but
for this lab that is not vital.
\subsection{Testing}
Compile, run and debug your design using the tools and techniques described
above. Note that you will need to modify the \texttt{Makefile}.
\subsection{Synthesize}
Modify the \texttt{synth.tcl} provided with this lab to synthesize the module
you've just written. Synthesize the module. Verify that it synthesized
correctly.
\section{Submission}
You must prove your testbench thoroughly tests your behavioral and structural verilog using either DVE or GTKwave. Place yourself on the \href{https://oh.eecs.umich.edu/courses/eecs470}{\underline{help queue}} during office hours once you're confident you've completed the lab satisfactorily. The assignment is due by the end of next week's lab.
% Once you're done with the assignment, you will need to submit it. To do this,
% you will run the following command:
% \noindent
% \texttt{\$ /afs/umich.edu/user/w/c/wcunning/Public/470submit -l1 lab1}
\end{document} | {
"alphanum_fraction": 0.7440363499,
"avg_line_length": 48.4991652755,
"ext": "tex",
"hexsha": "4e91ffff6613ecac66d22c387aedc91f464b6380",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-03-19T14:11:52.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-19T14:11:52.000Z",
"max_forks_repo_head_hexsha": "33b44f4b88cdd43930d4ccbdbbb82af914f43c22",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jieltan/OpenCompArchCourse",
"max_forks_repo_path": "labs/lab1/assignment/tex/eecs470lab1assignment.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "33b44f4b88cdd43930d4ccbdbbb82af914f43c22",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jieltan/OpenCompArchCourse",
"max_issues_repo_path": "labs/lab1/assignment/tex/eecs470lab1assignment.tex",
"max_line_length": 345,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "33b44f4b88cdd43930d4ccbdbbb82af914f43c22",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jieltan/OpenCompArchCourse",
"max_stars_repo_path": "labs/lab1/assignment/tex/eecs470lab1assignment.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-15T14:07:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-19T17:25:16.000Z",
"num_tokens": 7639,
"size": 29051
} |
\documentclass{article}
\usepackage{verbatim,amssymb,amsmath,array}
\usepackage{vo}
\pagestyle{empty}
\numberwithin{equation}{section}
\begin{document}
\hide{$
\newcommand{\Q}{\mathbb Q}
\newcommand{\R}{\mathbb R}
\newcommand{\C}{\mathbb C}
\newcommand{\Z}{\mathbb Z}
\newcommand{\N}{\mathbb N}
\newcommand{\Ga}{\alpha}
\newcommand{\Gb}{\beta}
\newcommand{\GD}{\Delta}
\newcommand{\Gd}{\delta}
\newcommand{\Go}{\omega}
\newcommand{\GO}{\Omega}
\newcommand{\GL}{\Lambda}
\newcommand{\Gr}{\rho}
\newcommand{\Gs}{\sigma}
\newcommand{\GS}{\Sigma}
\newcommand{\Gt}{\tau}
\newcommand{\Ge}{\varepsilon}
\newcommand{\Gz}{\zeta}
\newcommand{\p}{\partial}
\newcommand{\rnk}{\operatorname{rk}}
\newcommand{\inc}{\operatorname{in}}
\newcommand{\Ker}{\operatorname{Ker}}
\newcommand{\Hom}{\operatorname{Hom}}
\newcommand{\Int}{\operatorname{Int}}
\renewcommand{\Im}{\operatorname{Im}}
\newcommand{\lk}{\operatorname{lk}}
\newcommand{\sign}{\operatorname{sign}}
\newcommand{\fcs}{\mathcal R}
\newcommand{\ecs}{\mathcal E}
\newcommand{\vcs}{\mathcal V}
\renewcommand{\O}{\mathcal O}
\newcommand{\sminus}{\smallsetminus}
$}
\spnewtheorem{Th}{Theorem}[section]{\bf}{\it}
\renewcommand{\theTh}{\thesection.\Alph{Th}}
\spnewtheorem{cor}[Th]{Corollary}{\bf}{\it}
\spnewtheorem{proc}[Th]{}{\bf}{\it}
\spnewtheorem{lem}[Th]{Lemma}{\bf}{\it}
\spnewtheorem{ATh}{Theorem}[subsection]{\bf}{\it}
\renewcommand{\theATh}{\Alph{subsection}.\Alph{ATh}}
\spnewtheorem{Acor}[ATh]{Corollary}{\bf}{\it}
\spnewtheorem{Aproc}[ATh]{}{\bf}{\it}
\spnewtheorem{Alem}[ATh]{Lemma}{\bf}{\it}
\title{Twisted acyclicity of circle\\
and link signatures}
\author{\Author{Oleg Viro}\authorInfo{
Mathematics Department, Stony Brook University, Stony Brook
NY 11794-3651, USA\<br\>[email protected]}}
\date{}
\maketitle
\abstract
{Homology of the circle with non-trivial local coefficients
is trivial. From this well-known fact we deduce geometric
corollaries about links of codimension two. In particular,
the Murasugi-Tristram signatures are extended to invariants of
links formed of arbitrary oriented closed codimension two
submanifolds of an odd-dimensional sphere. The novelty is
that the submanifolds are not assumed to be disjoint, but
are transversal to each other, and the signatures are parametrized
by points of the whole torus. Murasugi-Tristram inequalities
and their generalizations are also extended to this setup.}
\section{Introduction}\label{s1}
The goal of this paper is to simplify and generalize a part
of classical link theory based on various signatures of links
(defined by Trotter \cite{Trot} Murasugi \cite{Mura1},\cite{Mura2},
Tristram \cite{Trist}, Levine \cite{Levine1} \cite{Levine2},
Smolinsky \cite{Smolinsky}, Florens \cite{Florens1} and
Cimasoni and Florens \cite{CimaFlor}).
This part is known for its relations to topology of 4-dimensional
manifolds, see \cite{Trist}, \cite{Viro1}, \cite{Viro2} \cite{Gilmer},
\cite{KaufTayl}
and applications in
topology of real algebraic curves \cite{Orevkov1}, \cite{Orevkov2} and
\cite{Florens1}.
Similarity of the signatures to the new invariants
\cite{Rasm}, \cite{OzsSz1},
which were defined in the new frameworks of link homology theories
and had spectacular applications \cite{Rasm}, \cite{Livingst},
\cite{Shum} to problems on classical link cobordisms, gives a new
reason to revisit the old theory.
There are two ways to introduce the signatures:
the original 3-dimensional, via Seifert surface and Seifert form, and
4-dimensional, via the intersection form of the cyclic coverings of
4-ball branched over surfaces. I believe, this paper clearly
demonstrates advantages of the latter, 4-dimensional approach, which
provides more conceptual definitions, easily working in the situations
hardly available for the Seifert form approach.
In the generalization considered here the classical links are
replaced by collections of transversal to each other oriented
submanifolds of codimension two.
Technically the work is based on a systematic use of twisted homology
and the intersection
forms in the twisted homology. Only the simplest kinds of
twisted homology is used, the one with coefficients in $\C$, see
Appendix.
\subsection{Twisted acyclicity of a circle}\label{s1.2}
A key property of twisted homology, which makes the whole story
possible, is the following well-known fact, which I call
{\sfit twisted acyclicity of a circle\/}:
{\it Twisted homology of a circle with coefficients in $\C$ and
non-trivial monodromy vanishes.}
This implies that the twisted homology of this kind completely
ignores parts of the space formed by circles along which
the monodromy of the coefficient system is non-trivial
(for precise and detailed formulation see Section \ref{sT.2}).
\subsection{How the acyclicity works}\label{s1.3}
In particular, twisted acyclicity of a circle implies
that the complement of a tubular neighborhood of a
link looks like a closed manifold, because the boundary, being
fibered to circles, is invisible for the twisted homology.
Moreover, the same holds true for a collection of pairwise
transversal generically immersed closed manifolds of codimension 2 in
arbitrary closed manifold, provided the monodromy around
each manifold is non-trivial.
The twisted homology does not feel the intersection of the
submanifolds as a singularity.
The complement of a cobordism between such immersed
links looks (again, from the point of view of twisted
homology) like a compact cobordism between closed manifolds.
This, together with classical results about signatures of manifolds
and relations between twisted homology and
homology with constant coefficients, allows us to deal with
a link of codimension two as if it was a single closed manifold.
\subsection{Organization of the paper}\label{s1.4}
I cannot assume the twisted homology well-known to the reader,
and review the material related to it. Of course, the material on
non-twisted homology is not reviewed. The review is
limited to a very special twisted homology, the one with
complex coefficients.
More general twisted homology is not needed here.
The review is postponed to appendices.
The reader somehow familiar with twisted homology may visit
this section when needed. The experts are invited
to look through appendices, too.
We begin in Section \ref{s2} with a detailed exposition restricted
to the classical links. Section \ref{s3} is devoted to higher
dimensional generalization, including motivation for our choice of
the objects. Section \ref{s4} is devoted to {\sl span inequalities\/},
that is,
restrictions on homology of submanifolds of the ball, which span
a given link contained in the boundary of the ball. Section \ref{s5}
is devoted to {\sl slice inequalities\/}, which are restrictions on
homology of a link with given transversal intersection with a sphere
of codimension one.
\section{In the classical dimension}\label{s2}
\subsection{Classical knots and links}\label{s2.1}
Recall that a {\sfit classical knot\/} is a smooth simple closed
curve in the 3-sphere $S^3$. This is how one usually defines
classical knots. However it is not the curve per se that
is really considered in the classical knot theory, but
rather its placement in $S^3$. Classical knots incarnate the
idea of knottedness: both the curve and $S^3$ are topologically
standard, but the position of the curve in $S^3$ may be
arbitrarily complicated topologically.
Therefore a classical knot
is rather a pair $(S^3,K)$, where $K$ is a smooth
submanifold of $S^3$ diffeomorphic to $S^1$.
A {\sfit classical link\/} is a pair $(S^3,L)$,
where $L$ is a smooth closed one-dimensional submanifold
of $S^3$. If $L$ is connected, then this is a knot.
\subsection{Twisted homology of a classical link
exterior}\label{s2.2}
An {\sfit exterior\/} of a classical link $(S^3,L)$ is the complement
of an open tubular neighborhood of $L$. This is a compact 3-manifold
with boundary. The boundary is the boundary of the tubular neighborhood
of $L$. Hence, this is the total space of a locally trivial fibration
over $L$ with fiber $S^1$. An exterior $X(L)$ is a deformation retract
of the complement $S^3\sminus L$. It's a nice replacement of
$S^3\sminus L$, because $\Int X(L)$ is homeomorphic to
$S^3\sminus L$, but $X(L)$ is compact manifold and has a nice boundary.
If $L$ consists of $m$ connected components, $L=K_1\cup\dots\cup K_m$,
then by the Alexander duality $H_0(X(L))=\Z$, $H_1(X(L))=\Z^m$,
$H_2(X(L))=\Z^{m-1}$
and $H_i(X(L))=0$ for $i\ne 0,1,2$. The group $H_1(X(L))$ is dual to
$H_1(L)$ with respect to the Alexander linking pairing
$H_1(L)\times H_1(X(L))\to\Z$. Hence a basis of $H_1(L)$ defines a dual
basis in $H_1(X(L))$. An orientation of $L$ determines a basis
$[K_1]$, \dots, $[K_m]$ of $H_1(L)$, and the dual basis of $H_1(X(L))$,
which is realized by meridians $M_1$, \dots, $M_m$ positively linked
to $K_1$, \dots, $K_m$, respectively. (The meridians are fibers of a
tubular fibration $\p X(L)\to L$ over points chosen on the corresponding
components.)
Therefore, if $L$ is oriented, then
a local coefficient system on $X(L)$ with fiber $\C$ is defined
by an $m$-tuple of complex numbers $(\Gz_1,\dots,\Gz_m)$, the images
under the monodromy homomorphism
$H_1(X(L))\to\C^\times$ of the generators $[M_1]$, \dots, $[M_m]$ of
$H_1(X(L))$.
Thus for an oriented classical knot $L$ consisting of $m$ connected
components, local coefficient systems
on $X(L)$ with fiber $\C$ are parametrized by $(\C^\times)^m$.
\subsection{Link signatures}\label{s2.3}
Let $L=K_1\cup\dots\cup K_m\subset S^3$ be a classical
link, $\Gz_i\in\C$, $|\Gz_i|=1$,
$\Gz=(\Gz_1,\dots,\Gz_m)\in (S^1)^m$ and $\mu:H_1(S^3\sminus
L)\to\C^\times$ takes to $\Gz_i$ a meridian of $K_i$ positively
linked with $K_i$.
Let $F_1,\dots F_m\subset D^4$ be smooth oriented surfaces transversal
to each other with $\p F_i=F_i\cap\p D^4=K_i$. Extend the tubular
neighborhood of $L$ involved in the construction of $X(L)$ to a
collection of tubular neighborhoods $N_1$, \dots, $N_m$ of
$F_1$, \dots, $F_m$, respectively.
Without loss of generality we may choose $N_i$ in such a way that they
would intersect each other in the simplest way.
Namely, each connected
component $B$ of $N_i\cap N_j$ would contain only one point of
$F_i\cap F_j$ and no point of others $F_k$
and would consist of entire fibers of $N_i$ and $N_j$,
so that the fibers define a structure of bi-disk $D^2\times D^2$ on $B$.
To achieve this, one has to make the fibers of the tubular fibration
$N_i\to F_i$ at each intersection point of $F_i$ and $F_j$ coinciding
with a disk in $F_j$ and then diminish all $N_i$ appropriately.
Now let us extend $X(L)$ to $X(F)=D^4\sminus\cup_{i=1}^m\Int N_i$.
This is a compact 4-manifold. Its boundary contains $X(L)$, the rest of
it is a union of pieces of boundaries of $N_i$ with $i=1,\dots, m$.
These pieces are fibered over the corresponding pieces of $F_i$ with
fiber $S^1$.
By the Alexander duality, the orientation of $F_i$ gives rise to a
homomorphism $H_1(X(F))\to\Z$ that maps a homology class to its
linking number with $F_i$.
These homomorphisms altogether determine a homomorphism
$H_1(X(F))\to \Z^m$. For any $\Gz=(\Gz_1,\dots,\Gz_m)$, the composition
of this homomorphism with the homomorphism
$$\Z^n\to(\C^\times)^m:
(n_1,\dots,n_m)\to(\Gz_1^{n_1},\dots,\Gz_m^{n_m})$$
is a homomorphism $H_1(X(F))\to(\C^\times)^m$ extending $\mu$.
If each $F_i$ has no closed connected components, then this extension
is unique.
Let us denote it by $\overline\mu$.
According to \ref{sT.4.6},
in $H_2(X(F);\C_{\overline\mu})$ there is a
Hermitian intersection form. Denote its signature by $\Gs_{\Gz}(L)$.
\begin{Th} $\Gs_{\Gz}(L)$
does not depend on $F_1,\dots,F_m$.
\end{Th}
\begin{proof} Any $F'_i$ with $\p F'_i=F'_i\cap\p
D^4=K_i$ is cobordant to $F_i$.
The cobordisms $W_i\subset D^4\times I$ can be made
pairwise transversal.
They define a cobordism $D^4\times I\sminus\cup_i\Int N(W_i)$
between $X(F)$ and
$X(F')$.
By Theorem \ref{VanSign},
$$\Gs_\Gz(\p D^4\times I\sminus\cup_i\Int N(W_i))=0.$$
The manifold $\p D^4\times I\sminus\cup_i\Int N(W_i)$ is the union of
$X(F)$, $-X(F')$ and a {\it
homologically negligible\/} part
$\p (N(\cup_i\Int W_i))$,
the boundary of a regular neighborhood
of the cobordism $\cup_iW_i$ between $\cup_iF_i$ and $\cup_iF'_i$.
By Theorem \ref{AddOfSign},
$$\Gs_\Gz(\p D^4\times I\sminus\cup_i\Int
N(W_i))=\Gs_\Gz(D^4\sminus\cup_iF_i)-\Gs_\Gz(D^4\sminus\cup_iF'_i)$$
Hence,
$\Gs_\Gz(D^4\sminus\cup_iF_i)=\Gs_\Gz(D^4\sminus\cup_iF'_i)$.
\end{proof}
\subsection{Colored links}\label{s2.4} In the definition of signature
$\Gs_{\Gz}(L)$ above one needs to numerate the components $K_i$ of $L$
to associate to each of them the corresponding component $\Gz_i$ of $\Gz$,
but there is no need to require connectedness of each $K_i$.
This leads to a notion of colored link.
An {\sl $m$-colored link} $L$ is an oriented
link in $S^3$ together with a map (called {\sl coloring\/}) assigning
to each connected component of $L$ a color in $\{1,\dots, m\}$.
The sublink $L_i$ is constituted by the components of $L$ with color $i$
for $i=1,\dots, m$.
For an $m$-colored link $L=L_1\cup\dots\cup L_m$ and
$\Gz=(\Gz_1,\dots,\Gz_m)\in (S^1)^m$, the signature $\Gs_\Gz(L)$ is
defined as above, but each component $K_j$ colored with color $i$ is
associated to $\Gz_i$.
\subsection{Relations to other link signatures}\label{s2.5}
If $\Gz_i=-1$ for all $i=1,\dots,m$, then the signature $\Gs_{\Gz}(L)$
coincides
with the Murasugi signature $\xi(L)$ introduced in \cite{Mura2}.
If all $\Gz_i$ are roots of unity of a degree, which is a power of a prime
number and all linking numbers $\lk(L_i,L_j)$ vanish, then $\Gs_{\Gz}(L)$
coincides with the signature defined by
Florens \cite{Florens1}.
In the most general case, $\Gs_{\Gz}(L)$ coincides with the signature
defined for arbitrary $\Gz$ by Cimasoni and Florens \cite{CimaFlor}
using a 3-dimensional approach, with a version of Seifert surface,
$C$-complex.
\section{In higher dimensions}\label{s3}
\subsection{Apology for the generalization of higher dimensional
links}\label{s3.1}
There is a spectrum of objects
considered as generalizations of classical knots and links.
The closest generalization of classical knots
are pairs $(S^n,K)$, where $K$ is a smooth submanifold
diffeomorphic to $S^{n-2}$. Then the requirements on $K$
are weakened. Say, one may require $K$ to be only homeomorphic
to $S^{n-2}$, not diffeomorphic. Or just a homology sphere
of dimension $n-2$. The codimension is important in order to
keep any resemblance to classical knots.
In the same spirit, for the closest higher-dimensional
counter-part of classical links one takes a pair consisting
of $S^n$ and a collection of its disjoint smooth
submanifolds diffeomorphic to $S^{n-2}$. One allows
to weaken the restrictions on the submanifolds. Up to arbitrary
closed submanifolds.
{\bfit I suggest to allow transversal intersections of the
submanifolds.\/}
Of course, the main excuse for this is that
some results can extended to this setup.
Here is a couple of other reasons.
First, in the classical dimension, it is easy for submanifolds to be
disjoint. Generically curves in 3-sphere are disjoint. If
they intersect, it is a miracle or, rather, has a special
cause.
Generic submanifolds of codimension two in a manifold
of dimension $>3$ intersect. If they do {\sfit not\/} intersect, this
is a miracle, or consequence of a special cause.
Second, classical links emerge naturally as links of
singular points
of complex algebraic curves in $\C^2$. Recall that for an
algebraic curve $C\subset\C^2$ and a point $p\in C$ the
boundary of a sufficiently small ball $B$ centered at $p$,
the link $(\p B,\p B\cap C)$ is well-defined up to
diffeomorphism, and it is called the {\sfit link of
$C$ at $p$}.
An obvious generalization of this definition to an
algebraic hypersurface $C\subset\C^n$ gives rise to
a pair $(S^{2n-1},K)$ with connected $K$. It cannot be
a union of {\it disjoint\/} submanifolds of $S^{2n-1}$.
It would not be difficult to extend the results of this paper
to a more general setup. For example, one can replace the ambient
sphere with a homology sphere, or even more general manifold.
However, one should stop somewhere. The author prefers this early
point, because the level of generality accepted here suffices for
demonstrating the new opportunities open by a systematic usage of
twisted homology. On the other hand, further generalizations
can make formulations more cumbersome.
\subsection{Colored links}\label{s3.2}
By an {\sl $m$-colored link of dimension\/} $n$ we shall mean a collection
of $m$ oriented smooth closed $n$-dimensional submanifolds $L_1$, \dots,
$L_m$ of the sphere $S^{n+2}$ such that any sub-collection has
transversal intersection. The latter means that for any
$x\in L_{i_1}\cap\dots\cap L_{i_k}$ the tangent spaces $T_xL_{i_1}$, \dots,
$T_xL_{i_k}$ are transverse, that is, $\dim(T_xL_{i_1}\cap\dots\cap
T_xL_{i_k})=n+2-2k$.
\subsection{Generic configurations of submanifolds}\label{s3.3}
More generally, an $m$-colo\-red configuration of transversal submanifolds
in a smooth manifold $M$ is a family of $m$ smooth
submanifolds $L_1$, \dots, $L_m$ of $M$ such that any sub-collection has
transversal intersection. If $M$ has a boundary, the submanifolds are
assumed to be transversal to the boundary, as well as the intersection of
any sub-collection. Furthermore, assume that $\p M\cap L_i=\p L_i$ for any
$i=1,\dots,m$.
As above, in Section \ref{s2.3}, for any $m$-colored configuration $L$
of transversal submanifolds $L_1$, \dots, $L_m$ in $M$ one can find a
collection of their tubular neighborhoods $N_1$, \dots, $N_m$ which agree
with each other in the sense that for any sub-collection $L_{i_1}$, \dots,
$L_{i_\nu}$ the intersection of the corresponding neighborhoods
$N_{i_1}\cap\dots\cap N_{i_\nu}$ is neighborhood of the intersection
$L_{i_1}\cap\dots\cap L_{i_\nu}$ fibered over this intersection with
the corresponding poly-disk fiber.
Denote the complement $M\sminus\cup_{i=1}^m\Int N_i$ by $X(L)$ and call it
an {\sfit exterior\/} of $L$. This is a smooth manifold with a system of
corners on the boundary. The differential type of the exterior does
not depend on the choice of neighborhoods. Moreover, one can eliminate the
choice of neighborhoods and deleting of them from the definition. Instead,
one can make a sort of real blowing up of $M$ along $L_1$, \dots, $L_m$.
However, for the purposes of this paper it is easier to stay with the
choices.
\subsection{Link signatures}\label{s3.4}
Let $L=L_1\cup\dots\cup L_m$ be an $m$-colored link of dimension $2n-1$ in
$S^{2n+1}$.
As well known (see, e.g., \cite{Levine1}), for each oriented closed
codimension 2 submanifold $K$ of $S^{2n+1}$
there exists an oriented smooth compact submanifold
$F$ of $D^{2n+2}$ such that $\p F=K$. Choose for each $L_i$ such a
submanifold of $D^{2n+2}$, denote it by $F_i$, and make all the $F_i$
transversal to each other by small perturbations.
As a union of $m$-colored transversal submanifolds of $D^{2n+2}$,
$F= F_1\cup \dots\cup F_m$ has an exterior $X(F)$.
By the Alexander duality, $H^1(X(F);\C^\times)$ is naturally isomorphic to
$H_{2n}(F,L;\C^\times)$. Let $\Gz=(\Gz_1,\dots,\Gz_m)\in(S^1)^m$.
Take $\sum_{i=1}^m\Gz_i[F_i]\in H_{2n}(F,L;\C^\times)$ and denote
by $\mu$ the Alexander dual cohomology class considered as a homomorphism
$H_1(X(F))\to\C^\times$. Denote by $\C_\mu$ the local coefficient system
on $X(F)$ corresponding to $\mu$.
According to \ref{sT.4.6}, in $H_{n+1}(X(F);\C_\mu)$ there is an
intersection form, which is Hermitian, if $n$ is odd, and skew-Hermitian,
if $n$ is even. Denote its signature by $\Gs_\Gz(L)$.
\begin{Th} $\Gs_{\Gz}(L)$
does not depend on $F_1,\dots,F_m$.
\end{Th}
\begin{proof} Any $F'_i$ with $\p F'_i=F'_i\cap\p
D^{2n+2}=L_i$ is cobordant to $F_i$.
The cobordisms $W_i\subset D^{2n+2}\times I$ can be made
pairwise transversal to form $m$-colored configuration $W$ of
transversal submanifolds of $D^{2n+2}\times I$.
They define a cobordism $X(W)$
between $X(F)$ and
$X(F')$.
By Theorem \ref{VanSign},
$$\Gs_\Gz(\p X(W))=0.$$
The manifold $\p X(W)=\p D^{2n+2}\times I\sminus\cup_i\Int N(W_i)$
is the union of
$X(F)$, $-X(F')$ and a {\it
homologically negligible\/} part
$\p (N(\cup_i\Int W_i))$,
the boundary of a regular neighborhood
of the cobordism $\cup_iW_i$ between $F$ and $F'$.
By Theorem \ref{AddOfSign},
$$\Gs_\Gz(\p X(W))=\Gs_\Gz(X(F))-\Gs_\Gz(X(F'))$$
Hence,
$\Gs_\Gz(X(F))=\Gs_\Gz(X(F'))$.
\end{proof}
\section{Span inequalities}\label{s4}
Let $L=L_1\cup\dots,\cup L_m$ be an $m$-colored link of dimension $2n-1$
in $S^{2n+1}$. Let $F=F_1\cup\dots\cup F_m$ be an $m$-colored
configuration of transversal oriented compact $2n$-dimensional
submanifolds of $D^{2n+2}$ with $\p F_i=F_i\cap\p D^{2n+2}=L_i$.
In this section we consider restrictions on homological
characteristics of $F$ in terms of invariants of $L$.
\subsection{History}\label{s4.1}
The first restrictions of this sort were found by Murasugi \cite{Mura1}
and Tristram \cite{Trist} for classical (1-colored) links. To $m$-colored
classical links and pairwise disjoint surfaces $F_i$ the Murasugi-Tristram
inequalities were generalized by Florens \cite{Florens1}. A further
generalization to $m$-colored classical links and intersecting $F_i$
was found by Cimasoni and Florens \cite{CimaFlor}. Higher dimensional
generalizations for $1$-colored links were found by the author
\cite{Viro2}, \cite{Viro3}.
\subsection{No-nullity span inequalities}\label{s4.2}
The most general results in this direction are quite cumbersome.
Therefore, let me start with weak but simple ones.
Recall that $\Gs_\Gz(L)$ can be obtained from $F$: for an appropriate
local coefficient system $\C_\mu$ on $X(F)$, this is the signature
of a Hermitian intersection form defined in $H_{n+1}(X(F);\C_\mu)$.
The signature of an Hermitian form cannot be greater than the
dimension of the underlying space. In particular,
\begin{equation}\label{IneQSleD}
|\Gs_\Gz(L)|\le\dim_\C H_{n+1}(X(F);\C_\mu).
\end{equation}
This can be considered as a restriction on a homological
characteristic of $F$ in terms of invariants of $L$. However,
$\dim_\C H_{n+1}(X(F);\C_\mu)$ is not a convenient characteristic of $F$.
It can be estimated in terms of more convenient ones.
Let $\Gz=(\Gz_1,\dots,\Gz_m)\in (S^1)^m$.
Let $p_1,\dots,p_k\in\Z[t_1,t_1^{-1}\dots,t_m,t_m^{-1}]$ be
generators of the ideal of relations
satisfied by complex numbers $\Gz_i$. Let $d$ be the
greatest common divisor of the integers
$p_1(1,\dots,1)$, \dots, $p_k(1,\dots,1)$, if at least one of these
integers does not vanish, and zero otherwise. Cf. \ref{sT.3.6}. Let
$$
P=\begin{cases} \Z/p\Z, &\text{ if }d>1\text{ and }p\text{ is a prime
divisor of }d\\
\Q, &\text{ if }d=0
\end{cases}
$$
By \ref{EstTwHom},
$$\dim_\C H_{n+1}(X(F);\C_\mu)\le
\dim_{P} H_{n+1}(X(F);P).
$$
The advantage of passing to homology with non-twisted coefficients is
that we can use the Alexander duality: \begin{multline*}
H_{n+1}(X(F);P)=H_{n+1}(D^{2n+2}\sminus F;P)\\=
H^{n+1}(D^{2n+2},\p D^{2n+2}\cup F;P)\\=
H^{n}(\p D^{2n+2}\cup F;P)=
H^n(F,L;P).
\end{multline*}
Hence,
$$|\Gs_\Gz(L)|\le \dim_{P}H_n(F,L;P).
$$
\subsection{General span inequalities}\label{s4.3}
The inequality \eqref{IneQSleD} can be improved. Indeed, the manifold
$X(F)$ has a non-empty boundary. Therefore, its intersection form may be
degenerate and the right hand side of \eqref{IneQSleD} may be replaced by
a smaller quantity, the rank of the form. The rank is known to be
the rank of the homomorphism
$H_{n+1}(X(F);\C_\mu)\to H_{n+1}(X(F),\p X(F);\C_\mu)$.
Let us estimate this rank.
\begin{lem}\label{Lemma1Th7}
For any exact sequence $\dots\overset{\rho_{k+1}}\to C_k\overset{\rho_k}\to
C_{k-1}\overset{\rho_{k-1}}\to\dots$ of vector spaces
and any integers $n$ and $r$
\begin{equation}\label{eqL1Th7} \rnk(\rho_{n+1})+\rnk(\rho_{n-2r})
=\sum_{s=0}^{2r}(-1)^s\dim C_{n-s}
\end{equation}
\end{lem}
\begin{proof}The Euler characteristic of the exact sequence
$$
0\to\Im\rho_{n+1}\hookrightarrow C_n\overset{\rho_{n}}\to
C_{n-1}\to\dots\overset{\rho_{n-2r+1}}\to C_{n-2r}\to\Im\rho_{n-2r}\to0
$$
is the difference between the left and right hand sides of \eqref{eqL1Th7}.
On the other hand, it vanishes, as the Euler characteristic of an exact
sequence.
\end{proof}
\begin{lem}\label{Lemma2Th7}
Let $X$ be a topological space, $A$ its subspace, $\xi$ a local coefficient
system on $X$ with fiber $\C$. Then for any natural $n$ and $r\le\frac{n}2$
\begin{multline}
\rnk(H_{n+1}(X;\xi)\to H_{n+1}(X,A;\xi))+
\rnk(H_{n-2r}(X;\xi)\to H_{n-2r}(X,A;\xi))\\
\\
=\sum_{s=0}^{2r}(-1)^sb_{n+1-s}(X,A)
-\sum_{s=0}^{2r}(-1)^sb_{n-s}(A)
+\sum_{s=0}^{2r}(-1)^sb_{n-s}(X)
\end{multline}
where $b_k(*)=\dim_\C H_k(*;\xi)$
\end{lem}
\begin{proof}
Apply Lemma \ref{Lemma1Th7} to the homology sequence of pair $(X,A)$ with
coefficients in $\xi$.
\end{proof}
\begin{Th}\label{Th7} For any integer $r$ with $0\le r\le\frac{n}2$\\
\begin{multline}\label{IneQSpan}
|\Gs_{\Gz}(L)|+\sum_{s=0}^{2r}(-1)^s\dim_\C
H_{n-s}(S^{2n+1}\sminus L;\C_\Gz)
\\ \le \sum_{s=0}^{2r}(-1)^s\dim H_{n+1+s}(F,L;P)
+\sum_{s=0}^{2r}(-1)^s\dim H_{n+s}(F;P)
\end{multline}
\begin{multline}\label{IneQSpan2}
|\Gs_{\Gz}(L)|+\sum_{s=0}^{2r}(-1)^s\dim_\C
H_{n+1+s}(S^{2n+1}\sminus L;\C_\Gz)
\\ \le \sum_{s=0}^{2r}(-1)^s\dim H_{n-s}(F,L;P)
+\sum_{s=0}^{2r}(-1)^s\dim H_{n-s-1}(F;P)
\end{multline}
where $\Gz$ and $P$ are is in Section \ref{s4.2}
\end{Th}
\begin{proof}As mentioned above,
\begin{equation}\label{eq1PfTh7}
|\Gs_\Gz(L)|\le \rnk(H_{n+1}(X(F);\C_\mu)\to H_{n+1}(X(F),\p X(F);\C_\mu)).
\end{equation}
By Lemma \ref{Lemma2Th7},
\begin{multline}\label{eq2PfTh7}
\rnk(H_{n+1}(X(F);\C_\mu)\to H_{n+1}(X(F),\p X(F);\C_\mu))\\
\le \sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1-s}(X(F),X(L);\C_\Gz)
-\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n-s}(X(L);\C_\Gz)\\
+\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n-s}(X(F);\C_\Gz).
\end{multline}
Summing up these inequalities and moving one of the sums from the right
hand side to the left, we obtain:
\begin{multline}\label{eq3PfTh7}
|\Gs_\Gz(L)|+\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n-s}(X(L);\C_\Gz)\\
\le \sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1-s}(X(F),X(L);\C_\Gz)
+\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n-s}(X(F);\C_\Gz).
\end{multline}
The left hand sum of \eqref{eq3PfTh7} coincides with the left hand side
of \eqref{IneQSpan}. The right hand side can be estimated using
Theorem \ref{EstTwHom}:
\begin{multline}\label{eq4PfTh7}
\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1-s}(X(F),X(L);\C_\Gz)
+\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n-s}(X(F);\C_\Gz)\\
\le\sum_{s=0}^{2r}(-1)^s\dim_P H_{n+1-s}(X(F),X(L);P)
+\sum_{s=0}^{2r}(-1)^s\dim_P H_{n-s}(X(F);P).
\end{multline}
Further,
$$H_{n+1-s}(X(F),X(L);P)=H_{n+1-s}(D^{2n+2}\sminus F,S^{2n+1}\sminus L;P).
$$
By the Alexander duality,
$$ H_{n+1-s}(D^{2n+2}\sminus F,S^{2n+1}\sminus L;P)
=
H^{n+1+s}(D^{2n+2},F;P).
$$
By exactness of the pair sequence, $H^{n+1+s}(D^{2n+2},F;P)=H^{n+s}(F;P)$.
Similarly,
\begin{multline*}
H_{n-s}(X(F);P)=H_{n-s}(D^{2n+2}\sminus F;P)\\
=H^{n+2+s}(D^{2n+2},F\cup S^{2n+1};P)\\
=H^{n+1+s}(S^{2n+1}\cup F;P)=H^{n+1+s}(F,L;P)
\end{multline*}
The last equality in this sequence holds true if $n+1+s<2n+1$, that is,
$s<n$.
Since $P$ is a field,
\begin{align}
\dim_P H^{n+s}(F;P)=&\dim_P H_{n+s}(F;P),\label{eq5PfTh7} \\
\end{align}
\begin{align}
\dim_P H^{n+1+s}(F,L;P)=&\dim_P H_{n+1+s}(F,L;P)\label{eq6PfTh7}.
\end{align}
Combining formulas \eqref{eq5PfTh7}, \eqref{eq6PfTh7} with the calculations
above and equalities \eqref{eq4PfTh7} and \eqref{eq3PfTh7}, we obtain the
first desired inequalities \eqref{IneQSpan}.
The inequalities \eqref{IneQSpan2} are proved similarly. Namely,
by Lemma \ref{Lemma2Th7}
\begin{multline}\label{eq7PfTh7}
\rnk(H_{n+1}(X(F);\C_\mu)\to H_{n+1}(X(F),\p X(F);\C_\mu))\\
\le \sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+2+s}(X(F),X(L);\C_\Gz)
-\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1+s}(X(L);\C_\Gz)\\
+\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1+s}(X(F);\C_\Gz).
\end{multline}
Summing up inequalities \eqref{eq1PfTh7} and \eqref{eq7PfTh7} and
moving one of the sums from the right
hand side to the left, we obtain:
\begin{multline}\label{eq8PfTh7}
|\Gs_\Gz(L)|+\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1+s}(X(L);\C_\Gz)\\
\le \sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+2+s}(X(F),X(L);\C_\Gz)
+\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1+s}(X(F);\C_\Gz).
\end{multline}
After this the same estimates and transformations as in the proof of
\eqref{IneQSpan} gives rise to \eqref{IneQSpan2}.
\end{proof}
\subsection{Nullities}\label{s4.4}
The sum in the left hand side of the inequalities \eqref{IneQSpan} is an
invariant of the link $L$. Its special case for classical links with $r=0$
is known as $\Gz$-nullity and appeared in the Murasugi-Tristram inequalities and their generalizations.
Denote $\sum_{s=0}^{2r}(-1)^s\dim
H_{n-s}(S^{2n+1}\sminus L;\C_\mu)$ by $n^r_{\Gz}(L)$ and call it
{\sfit $r$th $\Gz$-nullity of $L$\/}.
By the Poincar\'{e} duality (see \ref{sT.4.3}),
$H_{n-s}(S^{2n+1}\sminus L;\C_\mu)$ is isomorphic to
$H^{n+1+s}(S^{2n+1}\sminus L;\C_\mu)$. The latter vector space
is dual to $H_{n+1+s}(S^{2n+1}\sminus L;\C_{\mu^{-1}})$ and anti-isomorphic
to $H_{n+1+s}(S^{2n+1}\sminus L;\C_{\mu})$, see \ref{sT.4.5}.
Therefore,
\begin{equation}\label{null}
n^r_{\Gz}(L)=\sum_{s=0}^{2r}(-1)^s\dim_\C H_{n+1+s}(S^{2n+1}\sminus
L;\C_\mu)
\end{equation}
and $n^r_\Gz(L)=n^r_{\overline{\Gz}}(L)$.
This sum is a part of the left hand side of \eqref{IneQSpan2}.
Now we can rewrite Theorem \ref{Th7} as follows:
\begin{Th}\label{Th8} For any integer $r$ with $0\le 2r\le n$
\begin{multline}\label{null2}
|\Gs_{\Gz}(L)|+n^r_\Gz(L)
\\ \le \sum_{s=0}^{2r}(-1)^s\dim H_{n+s+1}(F,L;P)
+\sum_{s=0}^{2r}(-1)^s\dim H_{n+s}(F;P)
\end{multline}
\begin{multline}\label{null3}
|\Gs_{\Gz}(L)|+n^r_\Gz(L)
\\ \le \sum_{s=0}^{2r}(-1)^s\dim H_{n-s}(F,L;P)
+\sum_{s=0}^{2r}(-1)^s\dim H_{n-s-1}(F;P)
\end{multline}
\end{Th}
If $F_i$ are pairwise disjoint, than the right hand sides of \eqref{null2}
and \eqref{null3} are equal due to Poincar\'{e}-Lefschetz duality
for $F$, but we do not assume that $F=\cup F_i$ is a manifold, and
therefore the inequalities \eqref{null2} and \eqref{null3} are not
equivalent and we have to keep both of them.
\section{Slice inequalities}\label{s5}
Again, as in the preceding section, let $L_1,\dots, L_m\subset S^{2n+1}$
be smooth oriented transversal to each other submanifolds
constituting an $m$-colored link $L=L_1\cup\dots\cup L_m$ of
dimension $2n-1$.
Let $\GL_i\subset S^{2n+2}$ be oriented closed smooth submanifolds
transversal to each other and to $S^{2n+1}$,
with $\p \GL_i\cap S^{2n+1}=L_i$. In this section we consider restrictions
on homological characteristics of $\GL=\cup_{i=1}^m\GL_i$ in terms of
invariants of link $L$. Of course, some results of this kind can be deduced
from the results of the preceding section, but an independent consideration
gives better results.
\subsection{No-nullity slice inequalities}\label{s5.1}
The most general results in this direction are quite cumbersome.
Therefore, let me start with weak but simple ones.
We will use the same algebraic objects as in the preceding
section. In particular, $\Gz=(\Gz_1,\dots,\Gz_m)\in (S^1)^m$,
$p_1,\dots,p_k\in\Z[t_1,t_1^{-1}\dots,t_m,t_m^{-1}]$ are
generators of the ideal of relations
satisfied by complex numbers $\Gz_i$. Integer $d$ is the
greatest common divisor of the integers
$p_1(1,\dots,1)$, \dots, $p_k(1,\dots,1)$, if at least one of them
does not vanish, and $d=0$ otherwise. Cf. \ref{s4.2} and \ref{sT.3.6}.
Finally,
$$
P=\begin{cases} \Z/p\Z, &\text{ if }d>1\text{ and }p\text{ is a prime
divisor of }d\\
\Q, &\text{ if }d=0
\end{cases}
$$
Let $\mu:H_1(S^{2n+1}\sminus L)\to\C^\times$ be the homomorphism which
maps the meridian of $L_i$ to $\Gz_i$.
The local coefficient system $\C_\mu$ on $S^{2n+1}\sminus L$ defined by
$\mu$ extends to $S^{2n+2}\sminus\GL$.
We will denote the extension by the same symbol $\C_\mu$.
The sphere $S^{2n+1}$ bounds in $S^{2n+2}$ two balls, hemi-spheres
$S^{2n+2}_+$ and $S^{2n+2}_-$ such that $\p S^{2n+2}_+=S^{2n+1}$
and $\p S^{2n+2}_-=-S^{2n+1}$ with the orientations inherited
from the standard orientation of $S^{2n+2}$. In
$H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)$ there is a (Hermitian or
skew-Hermitian) intersection form. Its signature is zero by Theorem
\ref{VanSign}, because $\GL$ bounds a configuration of pairwise
transversal submanifolds $\GD=\GD_1\cup\dots\cup\GD_m$ in $D^{2n+3}$
and $\C_\mu$
extends over $D^{2n+3}\sminus\GD$.
\begin{Th}\label{slice-small-Th} Under the assumption above,
\begin{equation}\label{IneQSlice-easy}
2|\Gs_\Gz(L)|\le\dim_P H_n(\GL;P).
\end{equation}
\end{Th}
\begin{proof}
The intersection form on $H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)$ restricted
to the images of $H_{n+1}(S^{2n+2}_+\sminus\GL;\C_\mu)$ and
$H_{n+1}(S^{2n+2}_-\sminus\GL;\C_\mu)$ has signatures $\Gs_\Gz(L)$ and
$-\Gs_\Gz(L)$, respectively. Therefore the dimension of each of the images
is at least $|\Gs_\Gz(L)|$.
The images are obviously orthogonal to each
other with respect to the intersection form, because their elements
can be realized by cycles lying in disjoin open hemi-spheres.
Hence
$$
2|\Gs_\Gz(L)|\le\dim_\C H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu).
$$
On the other hand, by Theorem \ref{EstTwHom},
$$\dim_\C H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)\le
\dim_P H_{n+1}(S^{2n+2}\sminus\GL;P)=\dim_P H_n(\GL;P).$$
Summing up these two inequalities, we obtain the desired one.
\end{proof}
\subsection{General slice inequalities}\label{s5.2}
\begin{Th}\label{ThSlice}
Under assumptions above
\begin{multline}\label{IneQslice}
2|\Gs_\Gz(L)| +2n^r_\Gz(L)\\
\le
\sum_{s=0}^{2r}(-1)^s\dim_P H_{n-s}(\GL\sminus L;P)
+\sum_{s=-2r+1}^{2r-1}(-1)^s\dim_P H_{n-s}(\GL;P)
\end{multline}
\end{Th}
\begin{lem}\label{Lemma1Th}
Let $j$ be the inclusion $S^{2n+1}\sminus L\to S^{2n+2}\sminus\GL$.
Then
\begin{multline}\label{eq0PfTh}
2|\Gs_\Gz(L)| +
2\rnk(j_*:H_{n+1}(S^{2n+1}\sminus L;\C_\mu)\to
H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu))\\
\le \dim_\C H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)
\end{multline}
\end{lem}
\begin{proof}
Denote by $i^\pm$ the inclusion
$S^{2n+2}_\pm\sminus\GL\to S^{2n+2}\sminus\GL$.
Observe that the space $H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)$
has a natural filtration:
\begin{multline}\label{eq1PfTh}
j_*H_{n+1}(S^{2n+1}\sminus L;\C_\mu)\\
\subset
i^+_*H_{n+1}(S^{2n+2}_+\sminus\GL;\C_\mu)+
i^-_*H_{n+1}(S^{2n+2}_-\sminus\GL;\C_\mu)\\
\subset
H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)
\end{multline}
The inclusion homomorphisms
$$j_*:H_{n+1}(S^{2n+1}\sminus L;\C_\mu)\to
H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)$$
and the boundary homomorphism
$$
\p:H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)\to H_{n}(S^{2n+1}\sminus L;\C_\mu)
$$
of the Mayer-Vietoris sequence of the triad $(S^{2n+2}\sminus\GL;
S^{2n+2}_+\sminus\GL,S^{2n+2}_-\sminus\GL)$ are dual to each other
with respect to the intersection forms:
$$
j_*(a)\circ b=a\circ\p(b)\ \text{ for any }a\in
H_{n+1}(S^{2n+1}\sminus L;\C_\mu)\text{ and }b\in
H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu).
$$
Since the intersection forms are non-singular, it follows that
$\rnk j_*=\rnk \p$.
By exactness of the Mayer-Vietoris sequence, the rank of $\p$ is
the dimensions of the top quotient of the filtration \eqref{eq1PfTh}, while
the rank of $j_*$ is the dimension of the smallest term
$j_*H_{n+1}(S^{2n+1}\sminus L;\C_\mu)$ of this filtration.
The middle term of the filtration contains the subspaces
$i^+_*H_{n+1}(S^{2n+2}_+\sminus\GL;\C_\mu)$ and
$i^-_*H_{n+1}(S^{2n+2}_-\sminus\GL;\C_\mu)$. Their intersection
is the smallest term, which is orthogonal to both of the subspaces.
Therefore the dimension of the quotient of the middle term of the
filtration by the smallest term is at least $2|\Gs_\Gz(L)|$
The dimension of the whole space $H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)$
is the sum of the dimensions of the factors. We showed above that the
top and lowest factor have the same dimensions equal to $\rnk j_*$ and
that the dimension of the middle factor is at least $2|\Gs_\Gz(L)|$.
\end{proof}
\begin{lem}\label{Lemma1Th8}
For any exact sequence $\dots\overset{\rho_{k+1}}\to C_k\overset{\rho_k}\to
C_{k-1}\overset{\rho_{k-1}}\to\dots$ of vector spaces
and any integers $n$ and $t$
\begin{equation}\label{eqL1Th8} \rnk(\rho_{n})-\rnk(\rho_{n+2t})
=\sum_{s=0}^{2t-1}(-1)^s\dim C_{n+s}
\end{equation}
\end{lem}
\begin{proof}The Euler characteristic of the exact sequence
$$
0\to\Im\rho_{n+2t}\hookrightarrow C_{n+2t-1}\overset{\rho_{n+2t-1}}\to
C_{n+2t-2}\to\dots\overset{\rho_{n+1}}\to C_{n}\to\Im\rho_{n}\to0
$$
is
$
\rnk(\rho_{n})-\sum_{s=0}^{2t-1}(-1)^s\dim C_{n+s}-\rnk(\rho_n+2t)$,
that is the difference between the left and right hand sides of
\eqref{eqL1Th8}.
On the other hand, it vanishes, as the Euler characteristic of an exact
sequence.
\end{proof}
\begin{lem}\label{Lemma2Th8}
Let $X$ be a topological space, $A$ its subspace, $\xi$ a local coefficient
system on $X$ with fiber $\C$. Then for any natural $n$ and integer
$r$
\begin{multline}
\rnk(H_{n+1}(A;\xi)\to H_{n+1}(X;\xi))-
\rnk(H_{n+2+2r}(X;\xi)\to H_{n+2+2r}(X,A;\xi))\\
\\=
\sum_{s=0}^{2r}(-1)^sb_{n+1+s}(A)
-\sum_{s=0}^{2r}(-1)^sb_{n+2+s}(X,A)
+\sum_{s=0}^{2r-1}(-1)^sb_{n+2+s}(X)
\end{multline}
where $b_k(*)=\dim_\C H_k(*;\xi)$.
\end{lem}
\begin{proof}
Apply Lemma \ref{Lemma1Th8} to the homology sequence of pair $(X,A)$ with
coefficients in $\xi$.
\end{proof}
\begin{lem}\label{Lemma2Th}
For any integer $r$ with $0\le r\le\frac{n}2$\\
\begin{multline}\label{IneQSlice}
2|\Gs_\Gz(L)|+2n^r_\Gz(L)\\
\le 2\sum_{s=0}^{2r}(-1)^s\dim_\C
H_{n+2+s}(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L;\C_\mu)\\
+\sum_{s=-2r+1}^{2r-1}(-1)^s\dim_\C H_{n+1+s}(S^{2n+2}\sminus\GL;\C_\mu)
\end{multline}
\end{lem}
\begin{proof}By Lemma \ref{Lemma2Th8}
applied to the pair
$(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L)$, we obtain
\begin{multline}\rnk(j_*:H_{n+1}(S^{2n+1}\sminus L;\C_\mu)
\to H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu))\\
\ge
\sum_{s=0}^{2r}(-1)^sH_{n+1+s}(S^{2n+1}\sminus L;\C_\mu)\\
-\sum_{s=0}^{2r}(-1)^s\dim_\C
H_{n+2+s}(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L;\C_\mu)\\
+\sum_{s=0}^{2r-1}(-1)^s\dim_\C H_{n+2+s}(S^{2n+2}\sminus\GL;\C_\mu)
\end{multline}
From this inequality and inequality \eqref{eq0PfTh} we obtain
\begin{multline}\label{eq2PfTh}
2|\Gs_\Gz(L)|+2n^r_\Gz(L)\\
\le 2\sum_{s=0}^{2r}(-1)^s\dim_\C
H_{n+1+s}(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L;\C_\mu)\\
-2\sum_{s=0}^{2r-1}(-1)^s\dim_\C H_{n+s+2}(S^{2n+2}\sminus\GL;\C_\mu)\\
+\dim_\C H_{n+1}(S^{2n+2}\sminus\GL;\C_\mu)
\end{multline}
From this and the Alexander duality (which states that
$H_{n+1+s}(S^{2n+2}\sminus\GL;\C_\mu)$ is isomorphic to
$H_{n+1-s}(S^{2n+2}\sminus\GL;\C_{\mu})$)
the desired inequality follows.
\end{proof}
\begin{lem}\label{Lemma3Th}
\begin{multline}
\sum_{s=0}^{2r}(-1)^s
\dim_\C H_{n+1+s}(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L;\C_\mu)\\
\le
\sum_{s=0}^{2r}(-1)^s\dim_P H_{n-s}(\GL\sminus L;P)
\end{multline}
\end{lem}
\begin{proof}
By Theorem \ref{EstTwHom}
\begin{multline}
\sum_{s=0}^{2r}(-1)^s
\dim_\C H_{n+1+s}(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L;\C_\mu)\\
\le
\sum_{s=0}^{2r}(-1)^s
\dim_P H_{n+1+s}(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L;P).
\end{multline}
By Poincar\'{e} duality (cf. \ref{sT.4.3}),
$H_{n+1+s}(S^{2n+2}\sminus\GL,S^{2n+1}\sminus L;P)$
is isomorphic to $H^{n+1-s}(S^{2n+2}\sminus S^{2n+1},\GL\sminus L;P)$.
The latter is isomorphic to $H^{n-s}(\GL\sminus L;P)$. By the universal
coefficients formula, $H^{n-s}(\GL\sminus L;P)$ is isomorphic to
$H_{n-s}(\GL\sminus L;P)$.
\end{proof}
\begin{lem}\label{Lemma4Th}
\begin{multline}
\sum_{s=-2r+1}^{2r-1}(-1)^s\dim_\C H_{n+1+s}(S^{2n+2}\sminus\GL;\C_\mu)\\
\le \sum_{s=-2r+1}^{2r-1}(-1)^s\dim_P H_{n-s}(\GL;P)
\end{multline}
\end{lem}
\begin{proof}
By Theorem \ref{EstTwHom}
\begin{multline}
\sum_{s=-2r+1}^{2r-1}(-1)^s\dim_\C H_{n+1+s}(S^{2n+2}\sminus\GL;\C_\mu)\\
\le \sum_{s=-2r+1}^{2r-1}(-1)^s\dim_P H_{n+1+s}(S^{2n+2}\sminus\GL;P).
\end{multline}
By Poincar\'{e} duality,
$H_{n+1+s}(S^{2n+2}\sminus\GL;P)$ is isomorphic to
$H^{n+1-s}(S^{2n+2},\GL;P)$.
From the sequence of pair $(S^{2n+2},\GL)$ it follows that
$H^{n+1-s}(S^{2n+2},\GL;P)$ is isomorphic
to $H^{n-s}(\GL;P)$. By the universal coefficient formula,
$H^{n-s}(\GL;P)$ is isomorphic
to $H_{n-s}(\GL;P)$.
\end{proof}
\medskip
\noindent
{\bf Proof of Theorem \ref{ThSlice}.}
Sum up the inequalities of the last three Lemmas.\qed
%\appendix
\renewcommand\thesection{\appendixname}
\renewcommand\thesubsection{\Alph{subsection}}
\renewcommand\thesubsubsection{\Alph{subsection}.\arabic{subsubsection}}
\renewcommand\theequation{\Alph{subsection}.\arabic{equation}}
\section{Twisted homology}
\vspace{10pt}
\subsection{Twisted coefficients and chains}\label{sT.1}
\subsubsection{Local coefficient system}\label{sT.1.1}
Let $X$ be a topological space, and $\xi$ be a $\C$-bundle over $X$
with a fixed flat connection.
Here by a {\sfit connection\/} we mean operations of
{\sfit parallel transport\/}: for any path $s$ in $X$ connecting
points $x$ and
$y$ the parallel transport $T_s$ is an isomorphism from the fiber $\C_x$
over $x$ to the fiber $\C_y$ over $y$, such that the parallel transport
along product of paths equals the composition of parallel transports along the
factors. In formula: $T_{uv}=T_v\circ T_u$. A connection is flat, if
the parallel transport isomorphism does not change when the path is
replaced by a homotopic path.
A flat connection in a bundle $\xi$ over a simply connected $X$ gives a
trivialization of $\xi$.
Another name for $\xi$ is a {\sfit local
coefficient system\/} with fiber $\C$.
\subsubsection{Monodromy representation}\label{sT.1.2}
Recall that for a path-connected
locally contractible $X$ (and in more general situations, which would
not be of interest here) it is defined by the {\sfit monodromy
reprensentation\/} $\pi_1(X,x_0)\to\C^\times$, where
$\C^\times=\C\sminus0$ is the
multiplicative group of $\C$. The monodromy representation assigns to
$\Gs\in\pi_1(X,x_0)$ a complex number $\Gz$ such that the parallel
transport
isomorphism along a loop which represents $\Gs$ is multiplication by $\Gz$.
Since $\C^\times$ is commutative, a
homomorphism $\pi_1(X,x_0)\to\C^\times$ factors through the
abelianization
$\pi_1(X,x_0)\to H_1(X)$. Thus a local coefficient system with fiber
$\C$ is defined also by a homology version $\mu:H_1(X)\to\C^\times$
of the monodromy representation, which can be considered also as a
cohomology class belonging to $H^1(X;\C^\times)$.
The local coefficient system defined by a monodromy representation
$\mu:H_1(X)\to\C^\times$ is denoted by $\C^{\mu}$. Sometimes instead of
$\mu$ we will write data which defines $\mu$, for example the images
under $\mu$ of generators of $H_1(X)$ selected in a special way.
\subsubsection{Twisted singular chains}\label{sT.1.3}
Homology groups $H_n(X;\xi)$ of $X$ with coefficients in $\xi$ is a
classical invariant studied in algebraic topology. It is an immediate
generalization of $H_n(X;\C)$. Hence it is quite often ignored in
textbooks on homology theory, I recall the singular version of
the definition.
Recall that a singular $p$-dimensional chain
of $X$ with coefficients in $\C$ is a formal finite linear combination
of singular simplices $f_i:T^p\to X$ with complex coefficients.
A singular chain of $X$ with coefficients in $\xi$ is also a formal
finite linear combination of singular simplices, but each singular
simplex $f_i:T^p\to X$ appears
in it with a coefficient taken from the fiber $\C_{f_i(c)}$ of $\xi$
over $f_i(c)$,
where $c$ is the baricenter of $T^p$. Of course, all
the fibers of $\xi$ are isomorphic to $\C$. So, a chain with
coefficients in $\xi$ can be identified with a chain with coefficients
in $\C$, provided the isomorphisms $\C_{f_i(c)}\to\C$ are selected.
But they are not.
All singular $p$-chains of $X$ with coefficients in $\xi$ form a
complex vector space $C_p(X;\xi)$.
The boundary of such a chain is defined by the usual formula, but one
needs to bring the coefficient
from the fiber over $f_i(c)$ to the fibers over $f_i(c_i)$, where $c_i$
is the baricenter of the $i$th face of $T^p$.
For this, one may use translation along the composition with $f_i$ of
any path connecting $c$ to $c_i$ in $T^p$: since $T^p$ is simply
connected and the connection of $\xi$ is flat, the result does not
depend on the path.
These chains and boundary operators form a complex. Its homology is
called {\sfit homology with coefficients in\/} $\xi$ and denoted by
$H_p(X;\xi)$.
Homology with coefficients in the local coefficient system corresponding
to the trivial monodromy representation $1:H_1(X)\to\C^\times$ coincides
with homology with coefficients in $\C$.
\subsubsection{Twisted cellular chains}\label{sT.1.4}
It is possible to calculate the homology with coefficients in a
local coefficient system using cellular decomposition. Namely, a
$p$-dimensional cellular chain of a cw-complex $X$ with coefficients in
a local coefficient system $\xi$ is a formal finite linear combination
of $p$-dimensional cells in which a coefficient at a cell belongs to
the fiber over a point of the cell. It does not matter which point is
this, because fibers over different points in a cell are identified via
parallel transport along paths in the cell: any two points in a cell
can be connected in the cell by a path unique up to homotopy.
In order to describe the boundary operator, let me define the {\sfit
incidence number\/} $(z\Gs_x:\tau)_y\in\C_y$ where $\Gs$ is a $p$-cell,
$\tau$ is a $(p-1)$-cell, $z\in\C_x$, $x\in\Gs$, $y\in\tau$.
The boundary operator is then defined by the incidence numbers:
$$\p(z\Gs)=\sum_\tau(z\Gs_x:\tau)_y\tau.$$
Let $f:D^p\to X$ be a characteristic map for $\Gs$. Assume that a point
$y$ in $(p-1)$-cell $\tau$ is a regular value for $f$. This means that
$y$ has a neighborhood $U$ in $\tau$ such that $f^{-1}(U)\subset
S^{p-1}\subset D^p$ is the union of finitely many balls mapped by $f$
homeomorphically onto $U$. Connect
$f^{-1}(x)\in D^p$ with all the points of $f^{-1}(y)$ by straight paths.
Compositions of these paths with $f$ are paths $s_1$,\dots $s_N$
connecting $x$ with $y$. Then put
$$
(z\Gs:\tau)_y=\sum_{i=1}^N\Ge_iT_{s_i}(z)
$$
where $T_{s_i}$ is a parallel transport operator and
$\Ge_i=+1$ or $-1$ according to whether $f$ preserves or reverses
the orientation on the $i$th ball out of $N$ balls constituting
$f^{-1}(U)$.
\subsection{Twisted acyclicity}\label{sT.2}
\subsubsection{Acyclicity of circle}\label{sT.2.1}
According to one
of the most fundamental properties of homology, the dimension of
$H_0(X;\C)$
is equal to the number of path-connected components of $X$. In
particular, $H_0(X;\C)$ does not vanish, unless $X$ is empty.
This is not the case for twisted homology. A crucial example is the
circle $S^1$. Let $\mu:H_1(S^1)\to\C^\times$ maps the generator
$1\in\Z=H_1(S^1)$ to $\Gz\in\C^\times$.
\begin{ATh}[Twisted acyclicity of circle.]\label{1.A}
$H_*(S^1;\C^\mu)=0$, iff \ $\Gz\ne 1$.
\end{ATh}
\begin{proof}The simplest cw-decomposition of $S^1$
consists of two cells, one-dimensional $\Gs_1$ and zero-dimensional
$\Gs_0$. One can easily see that $\p\Gs_1=(\Gz-1)\Gs_0$. Hence
$\p:C_1(S^1;\C^\mu)\to
C_0(S^1;\C^\mu)$ is an isomorphism, iff $\Gz\ne0$.
\end{proof}
\subsubsection{Vanishing of twisted homology}\label{sT.2.2}
\begin{Acor}\label{1.B}
Let $X$ be a path connected space and
$\mu:H_1(S^1\times X)\to\C^\times$ be a homomorphism.
Denote by $\Gz$ the image under $\mu$ of the homology class
realized by a fiber $S^1\times \text{point}$.
Then $H_*(S^1\times X;\C^\mu)=0$, if \
$\Gz\ne0$.
\end{Acor}
\begin{proof} Since $H_1(S^1\times X)=H_1(S^1)\times H_1(X)$, the
homomorphism $\mu$ can be presented as product of homomorphisms
$\mu_1:H_1(S^1)\to\C^\times$ and $\mu_2:H_1(X)\to\C^\times$ which
can be obtained
as compositions of $\mu$ with the inclusion homomorphisms. Thus
$\C^\mu=\C^{\mu_1}\otimes\C^{\mu_2}$, and we can apply K\"unneth
formula
$$H_n(S^1\times X;\C^\mu)=\sum_{p=0}^n H_p(S^1;\C^{\mu_1})\otimes
H_{n-p}(X;\C^{\mu_2})$$
and refer to Theorem \ref{1.A}.
\end{proof}
\begin{Acor}\label{1.C}
Let $B$ be a path connected space, $p:X\to B$ a locally trivial
fibration with fiber $S^1$. Let $\mu:H_1(X)\to\C^\times$ be
a homomorphism. Denote by $\Gz$ the image under $\mu$ of homology class
realized by a fiber of $p$. Then $H_*(X;\C^\mu)=0$, if \
$\Gz\ne0$.
\end{Acor}
\begin{proof} It follows from Theorem \ref{1.A} via the spectral sequence of
fibration $p$.
\end{proof}
\subsection{Estimates of twisted homology}\label{sT.3}
\vspace{6pt}
\subsubsection{Equalities underlying the Morse inequalities}\label{sT.3.1}
\begin{Alem}\label{EqUnderlMI}
For a complex $C:\dots\to C_i\overset{\p_i}\to
C_{i-1}\to$ of finite dimensional vector spaces
over a field $F$
\begin{multline}\label{dimH}
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_FH_s(C)=\\
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_FC_s-\rnk\p_{r-1}-\rnk\p_{2n+r}.
\end{multline}
\end{Alem}
\begin{proof} First, prove inequality \eqref{dimH} for $n=0$.
Since $H_s(C)=\Ker\p_s/\Im\p_{s+1}$, we have
$\dim_FH_s(C)=\dim\Ker\p_s-\dim_F\Im\p_{s+1}$. Further,
$\dim_F\Im\p_{s+1}=\rnk\p_{s+1}$, and
$\dim_F\Ker\p_s=\dim_FC_s-\rnk\p_s$. It follows
\begin{equation}\label{dimHs}
\dim_FH_s(C)=\dim_FC_s-\rnk\p_s-\rnk\p{s+1}
\end{equation}
This is a special case of \eqref{dimH} with $n=0$, $r=s$.
The general case follows from it: make alternating summation of
\eqref{dimHs} for $s=r,\dots,2n+s$.
\end{proof}
\subsubsection{Algebraic Morse type inequalities}\label{sT.3.2}
\begin{Alem}\label{AlgLem}
Let $P$ and $Q$ be fields, $R$ be a subring of $Q$ and let $h:R\to P$ be
a ring homomorphism. Let $C: \dots\to C_p\to C_{p-1}\to\dots\to C_1\to C_0$
be a complex of free finitely generated $R$-modules. Then for any
$n$ and $r$
$$\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_QH_s(C\otimes_RQ)\le
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_PH_s(C\otimes_hP)
$$
\end{Alem}
Thus, the greater ranks of differentials, the smaller
$$\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_FH_s(C).$$
\begin{proof}
Choose free bases in modules $C_i$. Let $M_i$ be the matrix representing
$\p_i:C_i\to C_{i-1}$ in these bases. The same matrix represents the
differential $\p^Q_i$ of $C\otimes_RQ$. The matrix obtained from $M_i$ by
replacement the entries with their images under $h$ represents the
differential $\p^P_i$ of $C\otimes_hP$.
The minors of the latter matrix are the images of the former one
under $h$. Consequently, the $\rnk\p^Q_i\ge\rnk\p^P_i$.
By Lemma \ref{EqUnderlMI}
\begin{multline}\label{eqQ}
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_QH_s(C\otimes_RQ)= \\
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_QC_s\otimes_RQ-\rnk\p^Q_{r-1}-\rnk\p^Q_{r+2n}
\end{multline}
and
\begin{multline}\label{eqP}
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_PH_s(C\otimes_hP)= \\
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_PC_s\otimes_hP-\rnk\p^P_{r-1}-\rnk\p^P_{r+2n}
\end{multline}
Compare the right hand sides of these equalities.
The dimensions $\dim_PC_s\otimes_hP$, $\dim_QC_s\otimes_RQ$ are equal to
the rank of free $R$-module $C_s$.
Since, as it was shown above, $\rnk\p^Q_i\ge\rnk\p^P_i$, the right hand
side of \eqref{eqP} is smaller than the right hands side of \eqref{eqP}.
\end{proof}
Probably, the simplest application of Lemma \ref{AlgLem} gives
well-known upper estimation of the Betti numbers with rational
coefficients by the Betti numbers with coefficients in a finite field.
It follows from the universal coefficients formula.
\subsubsection{Application to twisted homology}\label{sT.3.3}
\begin{ATh}\label{EstTwHom}
Let $X$ be a finite cw-complex, and $\mu:H_1(X)\to\C^\times$ be a
homomorphism. If $\Im\mu\subset\C^\times$ generates a subring $R$ of $\C$ and
there is a ring homomorphism $h:R\to Q$, where $Q$ is a field, such that
$h\mu(H_1(X))=1$, then we can apply Lemma \ref{AlgLem} and get an upper
estimation for dimensions of twisted homology groups in terms of dimensions of
non-twisted ones.
\begin{equation}\label{twHomEst}
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_QH_s(X;\C^\mu)\le
\sum_{s=r}^{2n+r}(-1)^{s-r}\dim_PH_s(X;P)
\end{equation}
\end{ATh}
Here are several situations in which the assumptions of this theorem
are fulfilled.
\subsubsection{Estimates by untwisted $\Z/p\Z$ Betti numbers}\label{sT.3.4}
Let $H_1(X)$ be generated by $g$ and
$\zeta=\mu(g)$ be an algebraic number. Assume that $p$ is
the minimal integer polynomial with relatively prime coefficients
which annihilates $\zeta$. Assume also that $g(1)$ is divisible
by a prime number $p$.
Then for $R$ we can take
$\Q[\zeta]\subset\C$, for $P$ the field $\Z/p\Z$, and for $h$
the ring homomorphism
$\Q[\zeta]\to\Z/p\Z$ mapping $\zeta\mapsto1$.
Here is a more general situation: Let $H_1(X)$ be generated by
$g_1$,\dots $g_k$, and
$\zeta_i=\mu(g_i)$ be an algebraic number for each $i$. Assume that $p_i$ is
the minimal integer polynomial with relatively prime coefficients
which annihilates $\zeta_i$. Assume also that the greatest common
divisor of $g_1(1)$,\dots, $g_k(1)$ is divisible by a prime number $p$.
Then for $R$ we can take
$\Q[\zeta_1,\dots,\zeta_k]\subset\C$, for $P$ the field $\Z/p\Z$, and for $h$
the ring homomorphism
$\Q[\zeta_1,\dots,\zeta_k]\to\Z/p\Z$ mapping $\zeta_i\mapsto1$ for all
$i$.
\subsubsection{Estimates by rational Betti numbers}\label{sT.3.5} Let $H_1(X)$
be generated by $g$ and $\zeta=\mu(g)$ be transcendent.
Then for $R$ we can take the ring $\Z[\zeta,\Gz^{-1}]$,
for $Q$ the field $\Q(\zeta)$,
for $P$ the field $\Q$, and for $h$ the ring homomorphism $\Z[\zeta]\to\Q$
which maps $\zeta$ to 1.
\subsubsection{The most general estimates}\label{sT.3.6}
Let $H_1(X)$ be generated by $g_1$,\dots $g_k$ and $\Gz_i=\mu(g_i)$.
Laurent polynomials with integer coefficients annihilated by
$\Gz_1,\dots,\Gz_m$ form an ideal in the ring
$\Z[t_1,t_1^{-1}\dots,t_m,t_m^{-1}]$. Let $p_1,\dots,p_k$ be generators of
this ideal.
Let $d$ be the greatest common divisor of the integers
$p_1(1,\dots,1)$, \dots, $p_k(1,\dots,1)$, if at least one of them is
not 0. Otherwise, let $d=0$
In other words, consider the specialization homomorphism
$$S:\Z[t_1,t_1^{-1}\dots,t_m,t_m^{-1}]\to \C: t_i\mapsto\Gz_i.$$
Let $K$ be the kernel of $S$, and let $d$ be the generator of the ideal
which is the image of $K$ under the homomorphism
$$\Z[t_1,t_1^{-1}\dots,t_m,t_m^{-1}]\to\Z : t_i\mapsto 1.$$
Then for $R$ we can take the ring
$\Z[\Gz_1,\Gz_1^{-1},\dots,\Gz_k,\Gz_k^{-1}]$. For $Q$ we can take
the quotient field of $R$, but since both $Q$ and its quotient field
are contained in $\C$, let us take $Q=\C$.
If $d>1$, then we can take for $P$ the field $\Z/p\Z$ with any prime $p$
which divides $d$.
If $d=0$, then let $P=\Q$. The case $d=1$ is the most misfortunate:
then our technique does not give any non-trivial estimate.
For $d>1$ or $d=0$ we have the inequality \eqref{twHomEst}.
\subsection{Twisted duality}\label{sT.4}
\subsubsection{Cochains and cohomology}\label{sT.4.1}
Cochain groups $C^p(X;\xi)$ (which are vector spaces over
$\C$) and cohomology $H^p(X;\xi)$ are defined similarly:
$p$-cochain with coefficients in $\xi$ is a function assigning to a
singular simplex $f:T^p\to X$ an element of $\C_{f(c)}$, the
fiber of $\xi$ over $f(c)$.
This can be interpreted as the chain complex of the local coefficient
system $\Hom(\C,\xi)$ whose fiber over $x\in X$ is $\Hom_\C(\C,\C_x)$.
More generally, for any local coefficient systems $\xi$ and $\eta$ on $X$
with fiber $\C$ there is a local coefficient system $\Hom(\xi,\eta)$
constructed fiber-wise with the parallel transport defined naturally in terms of
the parallel transports of $\xi$ and $\eta$. If the monodromy
representations of $\xi$ and $\eta$ are $\mu$ and $\nu$, respectively,
then the monodromy representation of $\Hom(\xi,\eta)$ is
$\mu^{-1}\nu:H_1(X)\to\C^\times:x\mapsto\mu^{-1}(x)\nu(x)$.
Similarly, for any local coefficient systems $\xi$ and $\eta$ on $X$
with fiber $\C$ there is a local coefficient system $\xi\otimes \eta$.
If $\mu,\nu: H^1(X)\to\C^\times$ are homomorphisms, then
$\C^\mu\otimes\C^\nu$ is the local coefficient system $\C^{\mu\nu}$
corresponding to the homomorphism-product
$\mu\nu:H^1(X)\to\C^\times:x\mapsto \mu(x)\nu(x)$.
If $\nu=\mu^{-1}$ (that is $\mu(x)\nu(x)=1$ for any $x\in H^1(X)$),
then $\C^\mu\otimes\C^\nu$ is the non-twisted coefficient system
with fiber $\C$.
In contradistinction to non-twisted case, there is no way to calculate
$H_n(X;\xi\otimes\eta)$ in terms of $H_*(X;\xi)$ and $H_*(X;\eta)$.
Indeed, both $H_*(S^1;\C^\mu)$ and $H_*(S^1;\C^{\mu^{-1}})$ vanish,
unless $\mu:H_1(S^1)\to\C^\times$ is trivial, but
$H_0(S^1;\C^\mu\otimes\C^{\mu^{-1}})=H_0(S^1;\C)=\C$.
\subsubsection{Multiplications}\label{sT.4.2}
Usual definitions of various cohomological and homological
multiplications are easily generalized to twisted homology. For this one
needs a bilinear pairing of the coefficient systems. (Recall that in the
case of non-twisted coefficient system a pairing of coefficient groups
also is needed.) For local coefficient systems $\xi$, $\eta$ and $\zeta$
with fiber $\C$ on $X$, a pairing $\xi\oplus\eta\to\zeta$ is a fiber-wise map
which is bilinear over each point of $X$. Given such a pairing, there
are pairings
$$\smallsmile:H^p(X;\xi)\times H^q(X;\eta)\to H^{p+q}(X;\zeta),$$
$$\smallfrown:H^{p+q}(X;\xi)\times H^q(X;\eta)\to H^{p}(X;\zeta),$$
etc.
A pairing $\xi\oplus\eta\to\zeta$ of local coefficients systems can be
factored through the universal pairing $\xi\oplus\eta\to\xi\otimes\eta$.
Since $\C^\mu\otimes\C^{\mu^{-1}}$ is a non-twisted coefficient system
with fiber $\C$, this gives rise to a non-singular pairing
$$
C_p(X;\C^{\mu^{-1}})\otimes C^p(X;\C^\mu)\to \C
$$
which induces a non-singular pairing
$$
\smallfrown: H_p(X;\C^{\mu^{-1}})\otimes H^p(X;\C^\mu)\to \C
$$
Thus, the vector spaces $H_p(X;\C^{\mu^{-1}})$ and
$H^p(X;\C^\mu)$ are dual.
\subsubsection{Poincare duality}\label{sT.4.3}
Let $X$ be an oriented connected compact manifold of dimension $n$.
Then $H_n(X,\p X)$ is isomorphic to $\Z$ and the orientation is a
choice of the isomorphism, or, equivalently, the choice of a generator
of $H_n(X,\p X)$. We denote the generator by $[X]$.
Let $\mu:H_1(X)\to\C^\times$ be a homomorphism. There are the
Poincar\'{e}-Lefschetz duality isomorphisms
$$[X]\smallfrown :H^p(X;\C^\mu)\to H_{n-p}(X,\p X;\C^{\mu}),
$$
$$ [X]\smallfrown :H^p(X,\p X;\C^\mu)\to H_{n-p}(X;\C^{\mu})
$$
Similarly to the case of non-twisted coefficients, there are
non-singular pairings:
the cup-product pairing
$$
\smallsmile:H^p(X;\C^\mu)\times H^{n-p}(X,\p X;\C^{\mu^{-1}})\to
H^n(X;\C)=\C
$$
and intersection pairing
\begin{equation}\label{bilinIp}
\circ:H_p(X;\C^\mu)\times H_{n-p}(X,\p X;\C^{\mu^{-1}})\to \C
\end{equation}
However, the local coefficient systems of the homology or cohomology
groups involved in a pairing are different,
unless $\Im\mu\subset\{\pm1\}$.
\subsubsection{Conjugate local coefficient systems}\label{sT.4.4}
Recall that for vector spaces $V$ and
$W$ over $\C$ a map $f:V\to W$ is called semi-linear if $f(a+b)=f(a)+f(b)$
for any $a,b\in V$ and $f(za)=\overline zf(a)$ for $z\in\C$ and $a\in
V$. This notion extends obviously to fiber-wise maps of complex vector
bundles. If $\xi$ and $\eta$ local coefficient systems of the type that
we consider, then fiber-wise semi-linear bijection $\xi\to\eta$ commuting
with all the transport maps is called a {\sfit semi-linear
equivalence\/} between $\xi$ and $\eta$.
For any local coefficient system $\xi$ with fiber $\C$ on $X$ there
exists a unique local coefficient system on $X$ which is
semi-linearly equivalent to $\xi$. It is denoted by $\overline\xi$ and
called {\sfit conjugate\/} to $\xi$.
If $\xi=\C^\mu$, then $\overline\xi$ is $\C^{\overline\mu}$, where
$\overline\mu(x)=\overline{\mu(x)}$ for any $x\in H_1(X)$.
\subsubsection{Unitary local coefficient systems}\label{sT.4.5}
A homomorphism $\mu:H_1(X)\to\C^\times$ is called {\sfit unitary\/} if
$\Im\mu\subset S^1=U(1)=\{z\in\C\mid |z|=1\}$. In $S^1$ the inversion
$z\mapsto z^{-1}$ coincides with the complex conjugation: if $|z|=1$, then
$z^{-1}=\overline z$. Therefore if $\mu:H_1(X)\to\C^\times$ is unitary,
then $\overline{\C^\mu}=\C^{\mu^{-1}}$ and there exists a
{\sfit semi-linear\/ } equivalence $\C^\mu\to\C^{\mu^{-1}}$.
This semi-linear equivalence induces semi-linear equivalence
$$H_{k}(X;\C^\mu)\to H_k(X;\C^{\mu^{-1}})$$
and similar semi-linear equivalences in cohomology and relative
homology and cohomology.
Combining a semi-linear isomorphism
$$H_{n-p}(X,\p X;\C^\mu)\to H_{n-p}(X,\p X;\C^{\mu^{-1}})$$
of this kind with the intersection
pairing \eqref{bilinIp} we get a {\sfit sesqui-linear \/} pairing
\begin{equation}\label{ssqlinIp}
\circ:H_p(X;\C^\mu)\times H_{n-p}(X,\p X;\C^{\mu})\to \C
\end{equation}
(Sesqui-linear means that it is linear on the first variable, and
semi-linear on the second one.) This pairing is non-singular, because
the bilinear pairing \eqref{bilinIp} is non-singular, and \eqref{ssqlinIp}
differs from it by a semi-linear equivalence on the second variable.
\subsubsection{Intersection forms}\label{sT.4.6}
Let $X$ be an oriented connected compact smooth manifold of even
dimension $n=2k$ and $\mu:H_1(X)\to\C^\times$ be a unitary homomorphism.
Combining the relativisation homomorphism
$$
H_{n-p}(X;\C^{\mu})\to H_{n-p}(X,\p X;\C^\mu)
$$
with the pairing \eqref{ssqlinIp} for $p=k$ define sesqui-linear form
\begin{equation}\label{ssqlinIf}
\circ:H_k(X;\C^\mu)\times H_k(X;\C^\mu)\to\C
\end{equation}
It is called the {\sfit intersection form\/} of $X$.
If $k$ is even, this form is {\sfit Hermitian\/}, that is $\Ga\circ\Gb=\overline{\Gb\circ\Ga}$.
If $k$ is odd, it is {\sfit skew-Hermitian\/}, that is
$\Ga\circ\Gb=-\overline{\Gb\circ\Ga}$.
The difference between Hermitian and skew-Hermitian forms is not as deep
as the difference between symmetric and skew-symmetric bilinear forms.
Multiplication by $i=\sqrt{-1}$ turns a skew-Hermitian form into a
Hermitian one, and the original form can be recovered. In order to
recover, just multiply the Hermitian form by $-i$.
The intersection form \eqref{ssqlinIf} may be singular. Its radical,
that is the orthogonal complement of the whole $H_k(X;\C^\mu)$, is the
kernel of the relativisation homomorphism
$H_k(X;\C^{\mu})\to H_k(X,\p X;\C^\mu)$.
It can be described also as the image of the inclusion homomorphism
$$H_k(\p X;\C^{\mu\inc_*})\to H_k(X;\C^\mu),$$ where $\inc_*$ is the
inclusion homomorphism $H_1(\p X)\to H_1(X)$.
\subsubsection{Twisted signatures and nullities}\label{sT.4.7}
As well-known for any Hermitian form on a finite-dimensional space
$V$ there exists an orthogonal basis in which the form is represented by
a diagonal matrix. The diagonal entries of the matrix are real.
The number of zero diagonal entries is called the {\sfit nullity\/},
and the difference between the number of positive and negative
entries is called the {\sfit signature\/} of the form.
These numbers do not depend on the basis.
For a skew-Hermitian form by nullity and signature one means the nullity
and signature of the Hermitian form obtained by multiplication of the
skew-Hermitian form by $i$.
For a compact oriented $2k$-manifold $X$ and a homomorphism $\mu:H_1(X)\to\C$
the signature and nullity of the intersection form
$$\circ:H_k(X;\C^\mu)\times H_k(X;\C^\mu)\to\C $$
are denoted by $\Gs_\mu(X)$ and $n_\mu(X)$, respectively, and called the
{\sfit twisted\/} signature and nullity of $X$.
The classical theorems about the signatures of the symmetric intersection
forms of oriented compact $4k$-manifolds are easily generalized to
twisted signatures:
\begin{ATh}[Additivity of Signature.]\label{AddOfSign}
Let $X$ be an oriented compact manifold of even dimension.
If $A$ and $B$ are its compact submanifolds of the same dimension
such that $A\cup B=X$, $\Int A\cap\Int B=\varnothing$ and $\p(A\cap
B)=\varnothing$, then for any $\mu:H_1(X)\to\C^\times$
$$\Gs_\mu(X)=\Gs_{\mu\inc_*}(A)+\Gs_{\mu\inc_*}(B)$$
where $\inc$ denotes an appropriate inclusion.
\end{ATh}
\begin{ATh}[Signature of Boundary.]\label{VanSign}
Let $X$ be an oriented compact manifold of odd dimension. Then
$\Gs_{\mu\inc_*}(\p X)=0$
for any
$\mu:H_1(X)\to\C^\times$.
\end{ATh}
\begin{thebibliography}{99}
\bibitem[{Cimasoni and Florens}{2008}]{CimaFlor} David Cimasoni, Vincent Florens {\sl Generalized
Seifert surfaces and signatures of colored links}, Trans. Amer. Math. Soc.
{\bf 360} (2008), 1223--1264.
\bibitem[{Florens}{2005}]{Florens1} V.~Florens, {\sl Signatures of colored links
with application to real algebraic curves},
J. Knot Theory Ramifications 14 (2005), 883--918.
\bibitem[{Florens and Gilmer}{2003}]{FlorGilm} V.~Florens, P.~Gilmer, {\sl On the slice genus of links},
Algebr. Geom. Topol. {\bf 3} (2003), 905--920; arXiv:math/0311136 [math.GT].
\bibitem[{Gilmer}{1981}]{Gilmer} P.~M.~Gilmer, {\sl Configuration of surfaces
in 4-manifolds}, Trans. Amer. Math. Soc., {\bf 264} (1981), 353--380.
\bibitem[{Gordon and Litherland}{1978}]{GordLith} C.~McA.~Gordon, R.~A.~Litherland, {\sl On the
signature of a link}, Invent. Math. {\bf 47:1}
(1978) p53--69.
\bibitem[{Kauffman and Taylor}{1076}]{KaufTayl} L.~Kauffman, L.~Taylor, {\sl Signature of links},
Trans. Amer. Math. Soc. {\bf 216} (1976) 351--365.
\bibitem[{Levine}{1969a}]{Levine1} J.~Levine, {\sl Knot cobordism in codimension two},
Comment. Math. Helv., {\bf 44} (1969), 229--244.
\bibitem[{Levine}{1969b}]{Levine2} J.~Levine, {\sl Invariants of knot cobordism},
Invent. Math., {\bf 8} (1969), 98--110 and 355.
\bibitem[{Livingston}{2004}]{Livingst} C.~Livingston, {\sl Computations of the Ozsvath-Szabo
knot concordance invariant}, Geom. Topol. {\bf 8} (2004), 735--742;
arXiv:math.GT/0311036.
\bibitem[{Murasugi}{1965}]{Mura1} Kunio~Murasugi, {\sl On a certain numerical invariant
of link types}, Trans. Amer. Math. Soc., {\bf 117} (1965), 387--422.
\bibitem[{Murasugi}{1970}]{Mura2} Kunio~Murasugi, {\sl On the signature of links},
Topology, {\bf 9} (1970), 283--298.
\bibitem[{Orevkov}{1999}]{Orevkov1} S.~Orevkov, {\sl Link theory and oval
arrangements of real algebraic curves}, Topology {\bf38} (1999),
779--810.
\bibitem[{Orevkov}{2005}]{Orevkov2} S.~Orevkov, {\sl Plane real algebraic curves of
odd degree with a deep nest}, J. Knot Theory Ramifications 14 (2005),
497--522.
\bibitem[{Ozsvath and Szabo}{2003}]{OzsSz1} P.~Ozsvath and Z.~Szabo, {\sl Knot Floer homology and
the four-ball genus}, Geom. Topol. {\bf 7}
(2003), 615--639; arXiv:math.GT/0301149.
\bibitem[{Rasmussen}{2004}]{Rasm} J.~Rasmussen, {\sl Khovanov homology and the slice
genus}, arXiv:math.GT/0402131.
\bibitem[{Shumakovitch}{2004}]{Shum} Alexander Shumakovitch, {\sl Rasmussen invariant,
slice-Bennequin inequality, and sliceness of knots},
arXiv:math/0411643 [math.GT].
\bibitem[{Smolinsky}{1989}]{Smolinsky} L.~Smolinsky,{\sl A generalization of the
Levine-Tristram link invariant}, Trans. of A.M.S. {\bf 315} (1989) 205--217.
\bibitem[{Tristram}{1969}]{Trist} A.~G.~Tristram, {\sl Some cobordism invariants of
links}, Proc. Cambridge Philos. Soc., {\bf 66} (1969), 257--264.
\bibitem[{Trotter}{1962}]{Trot} H.~Trotter, {\sl Homology of group systems with
applications to knot theory}, Ann. of Math. {\bf 76:2}
(1962), 464--498.
\bibitem[{Viro}{1973}]{Viro1} O.~Y.~Viro, {\sl Branched coverings of manifolds with boundary and invariants of links. I}, Izvestiya AN SSSR, ser. Matem. {\bf 37:6}
(1973) 1242-1259
(Russian); English translation in Soviet Math. Izvestia {\bf 7} (1973),
1239--1255.
\bibitem[{Viro}{1975}]{Viro2} O.~Y.~Viro, {\sl Placements in codimension 2 and boundary}, Uspekhi Mat. Nauk {\bf 30:1} (1975) 231-232 (Russian).
\bibitem[{Viro}{1977}]{Viro3} O.~Y.~Viro, {\sl Signatures of links,\/}
Tezisy VII Vsesojuznoj topologicheskoj konferencii, Minsk,
1977, p. 43 (Russian).
\bibitem[{Viro and Turaev}{1977}]{TV} O.~Y.~Viro, V.~G.~Turaev, {\sl Estimates of Twisted
Homology,\/} Tezisy VII Vsesojuznoj topologicheskoj konferencii, Minsk,
1977, p. 42 (Russian).
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.6958542128,
"avg_line_length": 41.0083632019,
"ext": "tex",
"hexsha": "50f21a989f250fa0b2e07f4a1f1ed8571a3a7a6c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b07a076469cae6128d132b70027762760878a502",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "olegviro/VersoTeX",
"max_forks_repo_path": "Examples/acycl2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b07a076469cae6128d132b70027762760878a502",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "olegviro/VersoTeX",
"max_issues_repo_path": "Examples/acycl2.tex",
"max_line_length": 164,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b07a076469cae6128d132b70027762760878a502",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "olegviro/VersoTeX",
"max_stars_repo_path": "Examples/acycl2.tex",
"max_stars_repo_stars_event_max_datetime": "2017-05-23T00:59:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-05-23T00:59:47.000Z",
"num_tokens": 24819,
"size": 68648
} |
\documentclass[bibtotocnumbered, headsepline,normalheadings]{scrreprt}
\usepackage[latin1]{inputenc}
\usepackage[english]{babel}
\usepackage{scrpage}
\usepackage{alltt}
\pagestyle{headings}
\begin{document}
\title{Sun Grid Engine Resource Broker Adaptor for JavaGAT}
\author{Ole Christian Weidner}
\date{\today}
\maketitle
\tableofcontents
\chapter{SGE and the DRMAA API}
\section{Introduction to DRMAA}
DRMAA or Distributed Resource Management Application API is a high-level API specification for the submission and control of jobs on Distributed Resource Management (DRM) systems. It allows an application programmer to access Grid resources through a single API without having to care for the resource manager waiting on the other end. Thus DRMAA has the same functionality as the JavaGAT resource management component - it even uses a similar runtime adaptor loading concept.
\section{SGE Remote Execution Capabilities}
\chapter{DRMAA to GAT API Mapping}
\section{Job States}
Mapping job states from DRMAA to SGE is a little weird because of the limited number of states provided by GAT. The following table shows the current mapping. Note that DRMAA \texttt{*\_SUSPENDED} and \texttt{DONE} states are both mapped to the GAT \texttt{STOPPED} state, so you can't check for job completition just by querying the GAT job state when using the SGE adaptor. Furthermore DRMAA doesn't provide states for \texttt{PRE\_STAGING} and \texttt{POST\_STAGING}.
\begin{center}
\begin{tabular}{|l|l|l|l|} \hline
\multicolumn{2}{|c|}{GAT Job States}&
\multicolumn{2}{|c|}{SGE DRMAA Job States}\\ \hline\hline
\texttt{00} & \texttt{INITIAL} & \texttt{--} & \texttt{--}\\ \hline
\texttt{01} & \texttt{SCHEDULED} & \texttt{16} & \texttt{QUEUED\_ACTIVE}\\ \hline
\texttt{02} & \texttt{RUNNING} & \texttt{32} & \texttt{RUNNING}\\ \hline
\texttt{03} & \texttt{STOPPED} & \texttt{33} & \texttt{SYSTEM\_SUSPENDED}\\
& & \texttt{34} & \texttt{USER\_SUSPENDED}\\
& & \texttt{35} & \texttt{USER\_SYSTEM\_SUSPENDED}\\
& & \texttt{48} & \texttt{DONE}\\ \hline
\texttt{04} & \texttt{SUBMISSION\_ERROR} & \texttt{64} & \texttt{FAILED}\\ \hline
\texttt{05} & \texttt{ON\_HOLD} & \texttt{16} & \texttt{SYSTEM\_ON\_HOLD}\\
& & \texttt{18} & \texttt{USER\_ON\_HOLD}\\
& & \texttt{19} & \texttt{USER\_SYSTEM\_ON\_HOLD}\\ \hline
\texttt{07} & \texttt{PRE\_STAGING} & \texttt{--} & \texttt{--}\\ \hline
\texttt{08} & \texttt{POST\_STAGING} & \texttt{--} & \texttt{--}\\ \hline
\texttt{08} & \texttt{UNKNOWN} & \texttt{00} & \texttt{UNDETERMINED}\\ \hline
\end{tabular}
\end{center}
Within GAT you have the possibility to access the GAT job state as well as SGE's DRMAA job state. Normally you shouldn't query the DRMAA state within your grid application because doing this makes your application resource broker dependent - but if it comes to the crunch:
\begin{verbatim}
01 JobInfo info = Job.getInfo();
02 String sge_state = info.get("resManState");
\end{verbatim}
This piece of code gets the DRMAA state number (see table) from the JobInfo HashMap (as String). Note that you can't assume that all adaptor implementations provide a resManState field in the JobInfo map.
However, every adaptor provides a \texttt{getState()} methode to retrieve the GAT state. Use the following code to get the job's state (as Integer):
\begin{verbatim}
01 JobInfo info = Job.getInfo();
02 int sge_state = info.getState();
\end{verbatim}
As mendtioned above, you can't assume that a job is done just because it's in GAT \texttt{STOPPED} state. It could also be just suspended for some reason.
The proper way to check for job completition is to check for the job's state and if it equals \texttt{STOPPED} check for the job's exit status. If it's defined, the job is actually done - if not, something went wrong:
\begin{verbatim}
01 int state = job.getState();
02
03 if (state.equals("STOPPED")) {
04 if ( job.getExitStatus() == -255 )
05 // Something went wrong - -255 means undefined
06 else
09 // We have a return code - job is done!
08 System.out.println("Job exited with: "job.getExitStatus();
08 }
\end{verbatim}
\section{Job Control}
Job control in DRMAA is handled via the \texttt{Session.control(java.lang.String jobID, int action)} method, where \texttt{action} is an integer macro defining the desired action. The following table shows the current mapping between the DRMAA job actions and the GAT job control interface defined in \texttt{org.gridlab.gat.resources.Job}:
\begin{center}
\begin{tabular}{|l|l|l|} \hline
\multicolumn{1}{|c|}{GAT Job Methods}&
\multicolumn{2}{|c|}{SGE DRMAA Job Controls}\\ \hline\hline
\texttt{Job.unSchedule()} & \texttt{4} & \texttt{TERMINATE}\\ \hline
\texttt{Job.stop()} & \texttt{4} & \texttt{TERMINATE}\\ \hline
\texttt{Job.checkpoint()} & \texttt{--} & \texttt{--}\\ \hline
\texttt{Job.migrate()} & \texttt{--} & \texttt{--}\\ \hline
\texttt{Job.cloneJob()} & \texttt{--} & \texttt{--}\\ \hline
\end{tabular}
\end{center}
Note that the job control implementation is not yet completed. Although DRMAA does not directly support actions for checkpointing,
migrating and cloning, it should be not very difficult to combine some of the DRMAA actions to emulate the particular functions.
\section{Job Exit Value}
With the JavaGAT 1.5 Job API extension there's now a function to query the job's exit value. Job exit value means the return code of the executable submitted via JavaGAT. For example the exit value of \texttt{/bin/false} is \texttt{1} and the exit value of \texttt{/bin/true} is \texttt{0}.\\ \\
The job's exit value is retreived by using the DRMAA \texttt{getExitStatus()} method. However, this function only returns a value if the job has exited. If you trigger the \texttt{Job.getExitStatus()} function in JavaGAT while the job is still running, you'll get a \texttt{-255} as return value. You'll also get a \texttt{-255} when the job has exited but never returned an exit value.
\section{Job Info}
The job info Map contains the entries listed in the following table. The GAT API specification defines \texttt{hostname}, \texttt{scheduletime}, \texttt{starttime}, \texttt{stoptime} and \texttt{checkpointable} as supported entries - however, the SGE broker adaptor extends this list with some (hopefully) usefull entries.
\begin{center}
\begin{tabular}{|l|l|l|} \hline
\multicolumn{1}{|c|}{Name}&
\multicolumn{1}{|c|}{Description}\\ \hline\hline
\texttt{hostname} & Name of the host on which the job is running. \\ \hline
\texttt{scheduletime} & Time indicating when the job was scheduled. \\
& NOT IMPLEMENTED (will always return \texttt{null}). \\ \hline
\texttt{starttime} & Time indicating when the job was started. \\ \hline
\texttt{stoptime} & Time indicating when the job was stopped. \\ \hline
\texttt{checkpointable} & Indicating if the job is checkpointable. \\
& NOT IMPLEMENTED (will always return \texttt{0}). \\ \hline
\texttt{resManName} & Returns the name of the resource manager. \\
& Will always return "Sun Grid Engine". \\ \hline
\texttt{resManState} & Returns the DRMAA job state. \\ \hline
\texttt{jobID} & Returns the job's ID assigned by SGE \\ \hline
\end{tabular}
\end{center}
\chapter{Using the Adaptor}
\section{Dependencies}
\section{Build and Run your Application}
\end{document}
| {
"alphanum_fraction": 0.7096816801,
"avg_line_length": 58.6899224806,
"ext": "tex",
"hexsha": "abc0cffa05f4fef0ffb79e6cbb12b0503c7cbea2",
"lang": "TeX",
"max_forks_count": 15,
"max_forks_repo_forks_event_max_datetime": "2022-02-23T14:59:42.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-06-07T10:03:27.000Z",
"max_forks_repo_head_hexsha": "a74d97346121382a8a40ca15fa619e6e4cea917f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "mF2C/COMPSs",
"max_forks_repo_path": "dependencies/JAVA_GAT/doc/adaptors/SGEResourceBrokerAdaptor/SGEBrokerAdaptor.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "3b36b4264ac5f58476f5b89a452d9200b4702020",
"max_issues_repo_issues_event_max_datetime": "2019-03-20T14:17:40.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-10-25T12:20:52.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ramonamela/compss",
"max_issues_repo_path": "dependencies/JAVA_GAT/doc/adaptors/SGEResourceBrokerAdaptor/SGEBrokerAdaptor.tex",
"max_line_length": 476,
"max_stars_count": 31,
"max_stars_repo_head_hexsha": "3b36b4264ac5f58476f5b89a452d9200b4702020",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "ramonamela/compss",
"max_stars_repo_path": "dependencies/JAVA_GAT/doc/adaptors/SGEResourceBrokerAdaptor/SGEBrokerAdaptor.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-23T09:51:05.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-06T09:30:03.000Z",
"num_tokens": 2330,
"size": 7571
} |
\section{Discussion}
\textbf{Performance of DSSR strategy, Random strategy and Random plus strategy in terms of finding faults:}
Analysis of results revealed that DSSR performs better than random and random plus in programs with block and strip pattern of faults. However, since not all the programs contain faults in the form of block and strip patterns therefore the overall results show that random plus strategy stood first, DSSR strategy second and pure random strategy comes third in finding faults. \\
\textbf{Time taken by DSSR strategy, Random strategy and Random plus strategy to execute tests:}
To execute equal number of test cases, DSSR strategy took slightly more execution time than pure random and random plus test strategy. It is not unusual and we were expecting similar behaviour because pure random algorithm selects random input of the required type with minimum calculation and therefore its process is very quick. On the other hand random plus and DSSR strategy performs additional computation when it maintains the list of interesting values and selects the correct type test values from the list when required. The desired process of adding values to the list and selecting the required values from the list consumes extra time which is the main reason that random plus and DSSR strategy takes a little extra time. Thus in executing tests random strategy, DSSR strategy and random plus strategy comes first, second and third respectively. \\
\textbf{Effect of test duration in terms of time and number of tests on test results:}
We found that test duration increases either because of increase in time or number of test cases which results in improving the performance of DSSR strategy than random and random plus. It is because when test duration or number of tests increases, the list of interesting values also increases and in turn DSSR strategy get enough relevant values in the list of interesting values and can easily pick one from the list instead of selecting it randomly or from static list of random plus.\\
\textbf{Effect of number of faults on results:}
We also found that DSSR strategy performs better when the number of faults are more in the code. The reason is that when a fault is found in the code, DSSR strategy adds the neighbouring values of the fault finding value to the list of interesting values. Doing this increases the list of interesting values and the strategy is provided with more relevant test data resulting in higher chance of finding faults.\\
\textbf{Can Pure Random Testing perform better than DSSR strategy:}
The experimental results indicated that ocassionally pure random testing performs better than DSSR strategy if the SUT contain point pattern of failures rather than block and strip pattern. It is due to the fact that in such cases faults don't lay in the neighbourhood of found fault and adding neighbouring values of the founded fault dont make any impact on performance therefore the extra computational time becomes a liability.\\
\textbf{DSSR strategy Dependance on Random Testing:}
During the experiments we found that if the fault finding value is not in the list of interesting values then the test is dependant on random testing. In that case DSSR strategy has to wait for random testing to find the first fault and only then DSSR strategy will add its neighbouring values to the list of interesting values.
| {
"alphanum_fraction": 0.8159364893,
"avg_line_length": 170.05,
"ext": "tex",
"hexsha": "691752f0dd870068dac979902f4172462ff56546",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6cf105977c25eb94e641b06cb443bbe1573ef6b1",
"max_forks_repo_licenses": [
"BSD-4-Clause"
],
"max_forks_repo_name": "maochy/yeti-test",
"max_forks_repo_path": "papers/tools_DSSR/papers.Tools_DSSR/Discussion/Discussion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6cf105977c25eb94e641b06cb443bbe1573ef6b1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-4-Clause"
],
"max_issues_repo_name": "maochy/yeti-test",
"max_issues_repo_path": "papers/tools_DSSR/papers.Tools_DSSR/Discussion/Discussion.tex",
"max_line_length": 860,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6cf105977c25eb94e641b06cb443bbe1573ef6b1",
"max_stars_repo_licenses": [
"BSD-4-Clause"
],
"max_stars_repo_name": "maochy/yeti-test",
"max_stars_repo_path": "papers/tools_DSSR/papers.Tools_DSSR/Discussion/Discussion.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 671,
"size": 3401
} |
\input{../utils/slide-preamble1.tex}
\input{../utils/slide-preamble2.tex}
\input{../utils/macros.tex}
\bibliography{../bib/references}
\input{../utils/title-info.tex}
\title[Evidence for Evolution]{Evidence for Evolution}
% \date{\today}
\date{April 1, 2015}
\begin{document}
\begin{noheadline}
\maketitle
\end{noheadline}
\nopost{
\begin{noheadline}
\begin{frame}[c]
\vspace{-6mm}
\begin{center}
\includegraphics[height=1.3\textheight]{../images/seating-chart.pdf}
\end{center}
\end{frame}
\end{noheadline}
}
\begin{noheadline}
\begin{frame}
\begin{adjustwidth}{-2em}{-2em}
\vspace{-5mm}
\includegraphics<1| handout:0>[page=1,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<2| handout:0>[page=2,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<3| handout:0>[page=3,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<4| handout:0>[page=4,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<5| handout:1>[page=5,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<6| handout:0>[page=6,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<7| handout:0>[page=7,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<8| handout:0>[page=8,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<9| handout:0>[page=9,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<10| handout:2>[page=10,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<11| handout:0>[page=11,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<12| handout:3>[page=12,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<13| handout:4>[page=13,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<14| handout:0>[page=14,width=\paperwidth]{./johns-slides.pdf}
\includegraphics<15| handout:5>[page=15,width=\paperwidth]{./johns-slides.pdf}
\end{adjustwidth}
\end{frame}
\end{noheadline}
% \begin{noheadline}
% \begin{frame}
% \#ObserveEverything
% \bigskip
% \url{http://www.sciencefriday.com/teacher-resources/09/26/2014/science-club-observeeverything.html?interest=2&audience=4&series=34}
% \end{frame}
% \end{noheadline}
\blankslide
\begin{noheadline}
\begin{frame}
\begin{clickerquestion}
\item Suppose you wanted to do an experiment on how a new
blood pressure medication affects the risk of stroke. How
could you ensure that there was only one difference between
the treatment groups, but still have the most meaningful data
possible?
\begin{clickeroptions}
\item \clickeranswer{Have the volunteers live normally, but use a
large sample and randomly assign the treatments.}
\item Have the volunteers live in a hotel, where
environmental variables can be carefully controlled.
\item Have the volunteers live normally, but record all
environmental variables for each individual so you can
factor them out.
\item Compare the treatments in identical twins (people
who are genetically identical).
\end{clickeroptions}
\end{clickerquestion}
\end{frame}
\end{noheadline}
\begin{noheadline}
\begin{frame}
\frametitle{Today's issues:}
\tableofcontents
\end{frame}
\end{noheadline}
\section{Non-experimental evidence in science}
\begin{frame}
\begin{itemize}
\item<1-> In lab next week, you will be considering the nature of
experimental evidence.
\item<2-> What other sorts of evidence do scientists use?
\begin{itemize}
\item Astronomers:
\wbox{radio waves, light emissions, imaging, modeling based on
known processes and forces}
\item Geologists:
\wbox{inference about past events from current
landforms/processes (e.g., glacial striations)}
\end{itemize}
\end{itemize}
\end{frame}
% Do the “shoes and belt exercise” here
\begin{frame}
\begin{quote}
\small
All science textbooks purchased with state moneys must have the
following notice placed prominently in them.
This textbook discusses evolution, a controversial theory some
scientists present as a scientific explanation for the origin of living
things, such as plants, animals, and humans.
\highlight{No one was present} when life first appeared on earth.
Therefore, any statement about life’s origins should be considered
\highlight{as theory, not fact}.
The word ``evolution'' may refer to many types of change. Evolution
describes changes that occur within a species. (White moths, for
example, may ``evolve'' into gray moths.) This process is
microevolution, which can be observed and described as fact.
Evolution may also refer to the change of one living thing into
another, such as reptiles into birds. \highlight{This process}, called
macroevolution, \highlight{has never been observed and should be
considered a theory}.
\end{quote}
\vspace{-2mm}
\wbox{\small Theory = A testable explanation for a broad suite of observations;
supported by a lot of evidence; generates new hypotheses}
\end{frame}
\note[itemize]{
\item Highlight these passages in your notes.
\item Focus on diff of theory in everyday English v biology/science
\item Are theory and facts opposites? NO! Theories explain facts (data)!
Explanation more powerful than data.
\item Tennessee 1926 was the Scopes trial = illegal to teach evolution
\item Louisiana 1987 was equal time legislation struck down by the US
Supreme Court; Edwards v Aguillard
\item Washington State Legislature introduced Senate Bill 6058 in 2002
\item Washington State Senate Bill 6500 was even worse.
\item Pennsylvania 2005 Dover trial
\item Germ theory of disease, cell theory, theory of gravitation, atomic
theory, heliocentric theory of the solar system
}
% First amendment
\begin{frame}
\begin{itemize}
\item Pasteur published the germ theory of disease in the 1860s; no one
saw a virus until the 1950s.
\item The atomic theory of matter was proposed in the early 1900s; no
one saw an atom until the early 2000s.
\item No one has seen a graviton, but you are not floating off into
space!
\end{itemize}
How do these observations relate to the statements highlighted in purple on
the previous slide?
\begin{uncoverenv}<2->
\begin{itemize}
\item \href{http://www.pooprints.com/}{PooPrints} \ldots no joke!
\note[item]<1>{Dog poo CSI}
\end{itemize}
\end{uncoverenv}
\wbox{\small There was persuasive evidence to support these theories, even
though the evidence was not ``eye-witness''}
\vspace{1mm}
\uncover<3->{What is a theory?}
\wbox{\small An explanation for a broad class of phenomena that is
supported by a lot of evidence}
\note[item]<2>{human perception as a model; our brain processes data to create a (very)
subjective model of the world}
% Ask: Did OJ Simpson kill his estranged wife?
\end{frame}
\begin{frame}
\begin{itemize}
\item<1-> Science starts with a question
\begin{itemize}
\item ``The mystery of mysteries'': Where do species come from, and
how have they come to be so well adapted to their environments?
\note[item]<1>{Quote from John Herschel letter to Charles Lyell
about Lyell's Geology book}
\end{itemize}
\item<2-> Scientific theories usually have a pattern component and a
process component
\begin{itemize}
\item The pattern component is
\wbox{What we observe---``facts'' about the natural world---the
way things are: WHAT?}
\vspace{1cm}
\item The process component is
\wbox{The process responsible for that pattern (underlying
mechanism, physical force or event): HOW/WHY?}
\end{itemize}
\end{itemize}
\end{frame}
\section{Plato/Aristotle/Special creation}
\begin{frame}
\frametitle{Today's issues:}
\tableofcontents[currentsection]
\end{frame}
\begin{frame}
Process: \\
\wbox{A supernatural being instantaneously and independently created all of
the species observed today}
\note[item]{Special creation aka scientific creationism aka intelligent design}
\uncover<2->{Can the process component be tested? Why or why not?}
\vspace{1mm}
\wbox{No---it was a one-time supernatural occurrence}
\vspace{1mm}
\uncover<2->{Why is it called ``special'' creation?}
\vspace{1mm}
\wbox{It is not a natural process; it is supernatural}
\vspace{1mm}
\uncover<2->{Is it a valid scientific hypothesis or theory?}
\vspace{1mm}
\wbox{No---it lacks a testable natural process component}
\end{frame}
\begin{frame}
Pattern:
\begin{uncoverenv}<2->
\begin{itemize}
\item Special creation predicts:
\begin{enumerate}[a)]
\item Species are young
\item Species are unrelated (created independently)
\item Species are static
\end{enumerate}
\end{itemize}
\end{uncoverenv}
\end{frame}
\begin{frame}
Testing the prediction that (a) species are young:
\begin{description}
\item[Uniformitarianism] Processes occurring today also occurred in the
past (e.g., sedimentation, erosion)
\wbox{Relative age---erosion and sedimentation are slow---long times
required to create sandstones, chalk, mudstones, canyons, etc.}
\note[item]{Hutton in late 1700s; Lyell in 1830}
\item[Radiometric dating]
\wbox{Absolute dating of rocks---age of the earth is
$\approx$4.6 billion years; first life forms at $\approx$3.4 billion}
\note[item]{Henri Becquerel; Marie Curie 1900s-current data}
\end{description}
What do uniformitarianism and radiometric dating have to say about the age
of the Earth? \\
\end{frame}
\begin{frame}[t]
Testing the prediction that (b) species are independent/unrelated:
\begin{enumerate}
\item Geographic relationships; e.g., Wallace's Line in Southeast Asia
% Draw this
\end{enumerate}
\vspace{-2mm}
\begin{center}
\includegraphics<1| handout:1>[width=0.8\textwidth]{../images/se-asia-present.png}
\includegraphics<2| handout:0>[width=0.8\textwidth]{../images/se-asia-120.png}
\end{center}
\end{frame}
\note[itemize]{
\item Many other island groups with similar patterns
\item Anolis lizards in Caribbean
\item Fruit flies and silverswords in Hawaii
}
\begin{frame}
\begin{clickerquestion}
\item What does special creation predict about the
geographic relationships of closely related species?
\begin{clickeroptions}
\item Similar species should be found in the same geographic area.
\item Species that look similar should be found in similar
habitats, no matter where they are in terms of geography.
\item In terms of geography, species that look similar should be
distributed at random.
\item \clickeranswer{No specific predictions (species are found
where they are because God wanted them that way).}
\end{clickeroptions}
\end{clickerquestion}
\wbox{What predictions does the theory of special creation make? Problem:
can't test the predictions (this is unscientific)}
\end{frame}
\begin{frame}[t]
Testing the prediction that (b) species are independent/unrelated:
\begin{enumerate}[\begingroup 2.\endgroup]
\item Homology (``same source'') = similarities in form among species
(e.g., timber wolf, Ethiopian wolf, \ldots dog)
\end{enumerate}
\vspace{-3mm}
\begin{uncoverenv}<2->
\begin{center}
\includegraphics[height=2.9cm]{../images/canis-lupus.jpg}\hspace{0.1mm}
\includegraphics[height=2.9cm]{../images/canis-simensis.jpg}\hspace{0.1mm}
\includegraphics[height=2.9cm]{../images/luna.jpg}
% \includegraphics[height=2.7cm]{../images/thylacinus.jpg}
\end{center}
\end{uncoverenv}
\vspace{-3mm}
\uncover<3->{
Why are timber wolves, Ethiopian wolves, and dogs extremely similar at the
genetic, developmental, and structural levels?
\wbox{Divine intervention? Or, because they are closely related; they
inherited traits from an ancestor}
}
\note[item]{Africa's most endangered carnivore}
\end{frame}
\begin{frame}
Testing the prediction that (c) species are static:
\begin{enumerate}
\item Extinction/the law of succession:
\vspace{1cm}
\item Transitional features:
\vspace{1cm}
\item Vestigial traits:
\vspace{1cm}
\item Evolution in action:
\end{enumerate}
\wbox{Find examples of each pattern above. Are these patterns more
consistant (predicted by) special creation or evolution by natural
selection? What do they say about the internal consistency of
evolution?}
% Succession is like geographic relationships, except add element of time
% In general: under evolution by natural selection, all of these patterns
% are logical---under special creation, they are puzzling
% Scientific theories are considered powerful if they explain otherwise
% puzzling observations
% Internal consistency!! Predictions supported by many independent sourced
% of data
\end{frame}
\begin{frame}
\begin{clickerquestion}
\item Plant cells have 3 types of genomes (nucleus, mitochondria, and
chloroplast) that are inherited independently. From each, we can
estimate the relationships among
species based on similarities and differences in homologous DNA
sequences. If species evolve (share ancestry), what pattern
is predicted?
\begin{clickeroptions}
\item The relationships will be the same for the mitochondria and
chloroplast, but different for the nuclear genome.
\item The relationships among plant species will be different for
each genome.
\item \clickeranswer{The relationships among plant species will be
the same for all 3 genomes.}
\item No specific pattern is predicted.
\end{clickeroptions}
\end{clickerquestion}
% What predictions does the theory of special creation make?
% Problem: can't test the predictions (this is unscientific)
\end{frame}
\begin{frame}
Current consensus among working scientists and mainstream theologians:
\begin{itemize}
\item<2-> Science deals with questions that can be answered by going out
and measuring something.
\item<3-> Religion deals with questions that cannot be answered by
measuring something.
\item<4-> By definition\footnote{\scriptsize Webster’s Encyclopedic Unabridged
Dictionary of the English Language, 1966}, belief and faith
(including religious belief and faith) do not depend on evidence.
\end{itemize}
\end{frame}
\note[itemize]{
\item This is the distinction between science, pseudoscience, and
religion
\item There is no conflict between science and religion---except for
literal interpretation of the creation story of the Book of Genesis
\item US courts have always said No
}
\section{``Lamarckian'' evolution}
\begin{frame}
\frametitle{Today's issues:}
\tableofcontents[currentsection]
\end{frame}
\begin{frame}
Pattern:
\begin{itemize}
\item Key claim is that evolutionary change is
\wbox{progressive---leads to ladder of life}
\end{itemize}
\bigskip
Process:
\begin{itemize}
\item Key claim is that \highlight{individuals} change in
response to changes in the environment, and
\wbox{pass those changes on to offspring. As a result the
characteristics of populations change through time}
\end{itemize}
\end{frame}
\begin{frame}
\begin{clickerquestion}
\item Under the theory of evolution as formulated by Lamarck, why would
the traits of Pacific oysters change in response to ocean
acidification?
\begin{clickeroptions}
\item \clickeranswer{Individuals in low pH conditions would grow
shells more efficiently in response. These changes would be
passed on to offspring.}
\item Individuals that happened to be more efficient at
shell-building would have more offspring than others, so the
characteristics of the population would change over time.
\item A supernatural power would make the oysters more efficient at
shell-building in acidic conditions, so they would adapt.
\item Oysters would be eliminated from low pH habitats, and only be
found in higher pH habitats.
\end{clickeroptions}
\end{clickerquestion}
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.6700136924,
"avg_line_length": 36.4407484407,
"ext": "tex",
"hexsha": "0d3de1ea3bde546411f30f42e852d7120a322d2e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "joaks1/bio-180-intro-bio",
"max_forks_repo_path": "lecture-materials/03-evidence-for-evolution/03-evidence-for-evolution.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "joaks1/bio-180-intro-bio",
"max_issues_repo_path": "lecture-materials/03-evidence-for-evolution/03-evidence-for-evolution.tex",
"max_line_length": 137,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c212c94bd532f72f83d9d48d4393ada71f8b7b5a",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "joaks1/bio-180-intro-bio",
"max_stars_repo_path": "lecture-materials/03-evidence-for-evolution/03-evidence-for-evolution.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4311,
"size": 17528
} |
\documentclass[12pt]{article}
\usepackage[margin=0.8in]{geometry}
\usepackage{amsmath,amsthm,amssymb,amsfonts}
\newcommand{\N}{\mathbb{N}}
\newcommand{\Z}{\mathbb{Z}}
\newenvironment{problem}[2][Problem]{\begin{trivlist}
\item[\hskip \labelsep {\bfseries #1}\hskip \labelsep {\bfseries #2.}]}{\end{trivlist}}
%If you want to title your bold things something different just make another thing exactly like this but replace "problem" with the name of the thing you want, like theorem or lemma or whatever
\begin{document}
%\renewcommand{\qedsymbol}{\filledbox}
%Good resources for looking up how to do stuff:
%Binary operators: http://www.access2science.com/latex/Binary.html
%General help: http://en.wikibooks.org/wiki/LaTeX/Mathematics
%Or just google stuff
\title{Citadel Boston Regional Datathon 2021}
\author{Shinjini Ghosh, Lay Jain, Pawan Goyal}
\date{\today}
\maketitle
\section*{SEIVR Model Equations}
\begin{align*}
% \def\arraystretch{1.5}
% \renewcommand\arraystretch{1.5}
\Dot{S} & = \alpha R_S - \frac{S}{N}\beta I- \frac{S}{N}\chi E-\rho S \\[4pt]
\Dot{V_1} &= \rho S + \rho R_S - \frac{V_1}{N}\beta I - \frac{V_1}{N}\chi E - \phi V_1 \\[4pt]
\Dot{V_2} &= \phi V_1 + \phi ' R_1 + (1-\delta_2)I_2 - \frac{V_2}{N} \beta I - \frac{V_2}{N} \chi E \\[4pt]
\Dot{E_1} &= \frac{V_1}{N} \beta I + \frac{V_1}{N} \chi E - \theta E_1\\[4pt]
\Dot{E_2} &= \frac{V_2}{N} \beta I + \frac{V_2}{N} \chi E - \theta E_2 \\[4pt]
\Dot{E_S} &= \frac{S}{N} \beta I + \frac{S}{N} \chi E - \theta E_S \\[4pt]
\Dot{I_1} &= \theta E_1 - \delta_1 I_1 - (1-\delta_1) I_1 \\[4pt]
\Dot{I_2} &= \theta E_2 - \delta_2 I_2 - (1-\delta_2) I_2 \\[4pt]
\Dot{I_S} &= \theta E_S - \delta_S I_S - (1-\delta_S) I_S \\[4pt]
\Dot{R_1} &= (1-\delta_1) I_1 - \phi ' R_1 \\[4pt]
\Dot{R_S} &= (1-\delta_S) I_S - \rho R_S - \alpha R_S \\[4pt]
\Dot{D} &= \delta_1 I_1 + \delta_2 I_2 + \delta_S I_S \\
\end{align*}
\vspace{-4mm}
where
\vspace{-5mm}
\begin{align*}
I &= I_1 + I_2 + I_3 \\
E &= E_1 + E_2 + E_3 \\
N &= S + V_1 + V_2 + E_1 + E_2 + E_S + I_1 + I_2 + I_S + R_1 + R_2 + R_S + D \\
\end{align*}
\end{document}
| {
"alphanum_fraction": 0.6221709007,
"avg_line_length": 40.8490566038,
"ext": "tex",
"hexsha": "9797f0c26f6533ba26108ce13f22da189551eaaf",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6bb0957436f02fe91d9de46eae4d3171f2b42591",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "layjain/BRD-21",
"max_forks_repo_path": "reports/writeup.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6bb0957436f02fe91d9de46eae4d3171f2b42591",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "layjain/BRD-21",
"max_issues_repo_path": "reports/writeup.tex",
"max_line_length": 193,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6bb0957436f02fe91d9de46eae4d3171f2b42591",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "layjain/BRD-21",
"max_stars_repo_path": "reports/writeup.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 911,
"size": 2165
} |
% ------------------------------------------------------------------------+
% Copyright (c) 2001 by Punch Telematix. All rights reserved. |
% |
% Redistribution and use in source and binary forms, with or without |
% modification, are permitted provided that the following conditions |
% are met: |
% 1. Redistributions of source code must retain the above copyright |
% notice, this list of conditions and the following disclaimer. |
% 2. Redistributions in binary form must reproduce the above copyright |
% notice, this list of conditions and the following disclaimer in the |
% documentation and/or other materials provided with the distribution. |
% 3. Neither the name of Punch Telematix nor the names of other |
% contributors may be used to endorse or promote products derived |
% from this software without specific prior written permission. |
% |
% THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED |
% WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
% MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
% IN NO EVENT SHALL PUNCH TELEMATIX OR OTHER CONTRIBUTORS BE LIABLE |
% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
% CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
% SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR |
% BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, |
% WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE |
% OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN |
% IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
% ------------------------------------------------------------------------+
\chapter{Glossary}
\begin{description}
\item \ac
\item \agent
\item \aggregatorBundle
\item \aggregatorComponentVendor
\item \aggregatorService
\item \amic
\item \amicInterfaceConnector
\item \amicJavaApplication
\item \amicJavaHost
\item \amicStandardNetwork
\item \amicVehicleInterface
\item \api
\item \application
\item \applicationLayer
\item \asnOne
\item \asic
\item \asp
\item \asr
\item \asynchronous
\item \authentication
\item \avc
\item \awt
\item \bluetooth
\item \bridge
\item \bundles
\item \ca
\item \canbus
\item \ccc
\item \cdma
\item \certificate
\item \cgiScript
\item \client
\item \cmi
\item \codec
\item \context
\item \controller
\item \corba
\item \cpu
\item \crm
\item \csp
\item \customerAccesAdaptor
\item \customAccesConnector
\item \dab
\item \daisychain
\item \darc
\item \device
\item \dgps
\item \dsrc
\item \dtd
\item \ejb
\item \embeddedComponents
\item \expandability
\item \fcp
\item \fdma
\item \fpga
\item \firewall
\item \firewire
\item \garbageCollection
\item \gateway
\item \gprs
\item \gps
\item \gsm
\item \hmi
\item \host
\item \html
\item \http
\item \idb
\item \idbc
\item \ietf
\item \interchangeability
\item \interfaceConnector
\item \internetworkingProtocol
\item \interoperability
\item \ip
\item \ipGateway
\item \isdn
\item \isochronous
\item \isp
\item \its
\item \jTWOse
\item \jar
\item \java
\item \javaBeans
\item \javaApplication
\item \jcp
\item \jdbc
\item \jdk
\item \jms
\item \jmx
\item \jni
\item \jre
\item \jsp
\item \jvm
\item \messageSet
\item \mmtt
\item \mmiApi
\item \most
\item \mpeg
\item \nativeGateway
\item \network
\item \node
\item \oem
\item \osgi
\item \osgiFramework
\item \osiReferenceModel
\item \otf
\item \otfTerminal
\item \pcmcia
\item \pda
\item \persistence
\item \pki
\item \pmode
\item \ppp
\item \privateKey
\item \proofOfConcept
\item \proxy
\item \publickey
\item \qos
\item \reducedPower
\item \rfc
\item \rfp
\item \risc
\item \rmi
\item \rsc
\item \rtos
\item \sae
\item \salutation
\item \sdk
\item \service
\item \serviceAggregator
\item \serviceOsgi
\item \sgml
\item \SIM
\item \sms
\item \SP
\item \sql
\item \sqlj
\item \streaming
\item \synchronous
\item \tcp
\item \tdma
\item \token
\item \tts
\item \telematics
\item \uml
\item \umts
\item \upgradability
\item \utility
\item \vehicleManufacturerNetworkGateway
\item \vehicleServices
\item \vehicleServicesInterface
\item \vm
\item \vpn
\item \wap
\item \workstationReferenceImplementation
\item \xml
\end{description}
| {
"alphanum_fraction": 0.6227853317,
"avg_line_length": 13.3351648352,
"ext": "tex",
"hexsha": "ad08aa7f555d21736b0ad3247020b852d693e9a9",
"lang": "TeX",
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2021-07-13T11:35:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-05-05T15:19:17.000Z",
"max_forks_repo_head_hexsha": "079bcf51dce9442deee2cc728ee1d4a303f738ed",
"max_forks_repo_licenses": [
"ICU"
],
"max_forks_repo_name": "kifferltd/open-mika",
"max_forks_repo_path": "vm-cmp/kernel/oswald/doc/glossary.tex",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "079bcf51dce9442deee2cc728ee1d4a303f738ed",
"max_issues_repo_issues_event_max_datetime": "2020-12-14T18:08:58.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-04-11T10:45:33.000Z",
"max_issues_repo_licenses": [
"ICU"
],
"max_issues_repo_name": "kifferltd/open-mika",
"max_issues_repo_path": "vm-cmp/kernel/oswald/doc/glossary.tex",
"max_line_length": 75,
"max_stars_count": 41,
"max_stars_repo_head_hexsha": "079bcf51dce9442deee2cc728ee1d4a303f738ed",
"max_stars_repo_licenses": [
"ICU"
],
"max_stars_repo_name": "kifferltd/open-mika",
"max_stars_repo_path": "vm-cmp/kernel/oswald/doc/glossary.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-28T20:18:59.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-05-14T12:03:18.000Z",
"num_tokens": 1305,
"size": 4854
} |
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage[backend=biber]{biblatex}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{dsfont}
\addbibresource{bib.bib}
\setlength{\parindent}{0em}
\bibliography{bib}
\setlength{\parskip}{6pt}
\usepackage[margin=1.0in]{geometry}
\usepackage{graphicx}
\usepackage{caption}
\usepackage{subcaption}
\usepackage{wrapfig}
\usepackage{url}
\title{Intro to deep learning with PyTorch}
\author{Miguel A. Saavedra-Ruiz}
\date{May 2020}
\linespread{1.0}
\nocite{*}
\begin{document}
\maketitle
\section*{Autoencoders}
The autoencoder is a very simple neural network and it is similar to the multi-layer perceptron. This network architecture is designed to reproduce its input at the output layer. The main differences between an Autoencoder and a MLP is that the number of input neurons is equal to the number of output neurons.
\begin{figure}[ht]
\centering
\includegraphics[width=0.35\textwidth,height=0.35\textheight,keepaspectratio]{images/auto.png}
\captionsetup{justification=centering}
\caption{Autoencoder with three hidden layers}
\label{fig:f1}
\end{figure}
Fig. \ref{fig:f1} presents a simple stacked autoencoder with three hidden layers. As it is possible to see that the output size is the same as the input size. One important thing to note in that in the left size of the image the model starts with five input neurons and then then that gets reduced to three neurons in the first hidden layer and then to two in the second hidden layer. Subsequently, it gets gradually increased until the output layer has the same number of neurons as the input layer.
\begin{figure}[ht]
\centering
\includegraphics[width=0.25\textwidth,height=0.25\textheight,keepaspectratio]{images/simple_auto.png}
\captionsetup{justification=centering}
\caption{Autoencoder with one hidden layer}
\label{fig:f2}
\end{figure}
To explain the basic idea behind an autoencoder let's use a simpler model presented in Fig. \ref{fig:f2}. This model basically has one hidden layer and then the output has the same dimension as the input layer. For the sake of notation, see Fig. \ref{fig:f3} which is another representation of the same model presented in Fig. \ref{fig:f2}. Recall that an autoencoder is a feedforward network that is trained to reproduce its input at the output. Therefore, what this network architecture is trying to do is directly mimic its input at the output layer.
As it is possible to notice, \(W\) are the weights between the input layer and the hidden layer, \(b\) is the bias term of the hidden layer and \(h(x)\) is the output of the hidden layer. Furthermore, \(W^{out}\) are the weights between the output layer and the hidden layer, \(c\) is the bias term of the output layer and \(\overline{x}\) is the output of the final layer Fig. \ref{fig:f3}.
\begin{figure}[ht]
\centering
\includegraphics[width=0.35\textwidth,height=0.35\textheight,keepaspectratio]{images/architecture.png}
\captionsetup{justification=centering}
\caption{Representation of autoencoder with one hidden layer}
\label{fig:f3}
\end{figure}
Mathematically, the equations for an autoencoer are exactly the same as the ones for an MLP. In this case, for the hidden layer the equation is given by \eqref{eq:2} and for the output layer is \eqref{eq:2}. There are two main parts in an Autoencoder and that is the first chunk of the autoencoder that goes from the input layer to the hidden layer (Encoder) and the second part which goes from the hidden layer to the output layer (Decoder). Hence, the encoder equation for the autoencoder presented in Fig. \ref{fig:f2} is given by \eqref{eq:1} and for the decoder by \eqref{eq:2}.
\begin{equation}
h(x) = \sigma(Wx + b)
\label{eq:1}
\end{equation}
\begin{equation}
\overline{x} = \sigma(W^{out}h(x) + c)
\label{eq:2}
\end{equation}
The main function of the encoder section is to find a compressed representation of the input layer. On the other hand, the function of the decoder is reproduce the input layer at the output. A really common practice with autoencoders is to use tied weights. This means that the weights coming from the input to the hidden layer are going to be the same weights between the hidden and output layer but transposed \(W^{out} = W^T\). This approach is possible owing to the autoencoder is basically train to reproduce the input at the output layer. It is important to mention that the only thing tied are the weights, the bias are not tied.
The key idea in an autoencoder is that the encoder section is trying to create an internal representation that maintains all the information of the input. The really important aspect about the encoder layer is that it is possible to use it to extract meaningful features from the input data and create a compressed representation. Owing to this benefit of the encoder, Autoencoders have been used for principal components analysis (PCA).
An Stacked autoencoder is the one presented in Fig. \ref{fig:f1} and it basically has more hidden layers and a bigger ability for abstraction.
One example of a simple autoencoder is presented in the code \textit{Simple\_Autoencoder.ipynb}. This code receives as input the MNIST dataset and the idea is to output the same input digit. This simple autoencoder architecture is presented in Fig. \ref{fig:f4}
\begin{figure}[ht]
\centering
\includegraphics[width=0.45\textwidth,height=0.45\textheight,keepaspectratio]{images/simple_autoencoder.png}
\captionsetup{justification=centering}
\caption{Simple autoencoder example}
\label{fig:f4}
\end{figure}
To understand the decoder phase in a convolutional neural network, it is necessary to introduce two additional concepts which make possible the generation of an output image of the same size of the input image. Let's first talk about \textbf{Upsampling}. This technique is basically defined as the opposite of Downsampling and the idea is increase the dimensions of the future map whereas the downsampling method reduce its dimensions.
There are different techniques for upsampling. The first technique is called \textbf{Nearest Neighbors} and is presented in Fig. \ref{fig:f5}. As the name suggests it takes an input pixel value and copy it to the K-Nearest Neighbors where K depends on the expected output.
\begin{figure}[ht]
\centering
\includegraphics[width=0.45\textwidth,height=0.45\textheight,keepaspectratio]{images/nn.png}
\captionsetup{justification=centering}
\caption{Nearest Neighbors (Upsampling)}
\label{fig:f5}
\end{figure}
\textbf{Bed Of Nails} is other upsampling technique where the value of the input pixel is copyed at the corresponding position in the output image and filling zeros in the remaining positions Fig. \ref{fig:f6}.
\begin{figure}[ht]
\centering
\includegraphics[width=0.45\textwidth,height=0.45\textheight,keepaspectratio]{images/bn.png}
\captionsetup{justification=centering}
\caption{Bed of Nails (Upsampling)}
\label{fig:f6}
\end{figure}
Finally, \textbf{Max-Unpooling} which basically takes the maximum among all the values in the kernel. This process is performed by saving the index of the maximum value for every max-pooling layer during the encoding step. The saved index is then used during the Decoding step where the input pixel is mapped to the saved index, filling zeros everywhere else.
\begin{figure}[ht]
\centering
\includegraphics[width=0.85\textwidth,height=0.85\textheight,keepaspectratio]{images/mn.png}
\captionsetup{justification=centering}
\caption{Max-Unpooling (Upsampling)}
\label{fig:f6}
\end{figure}
Besides upsampling, there is another popular method used to increase the dimensions of the feature map and this is called \textbf{Trasposed convolution}. This method is widely used and has multiple applications in super image resolution and image segmentation. Usually this method is preferred over upsampling. A Transposed convolution takes an input feature map to generate an output feature map which has a spatial dimension greater than that of the input feature map. This technique uses a kernel whose values are learned during the training procedure.
The transposed convolution has the same parameters as a convolutional layer. The Stride, Padding and kernel size are exactly the same. Nevertheless, it is necessary to introduce new parameters for this process. The first parameter is \(z\) and is defined as the amount of zeros between pixel in the feature map Eq. \eqref{eq:3}, the second parameter is \(p'\) which is the new padding to use in the input feature map Eq. \eqref{eq:4}. The stride in a transposed convolutional layer is defined as \(s' = 1\) and this is always one. It is important to recall that \(k\) is the filter size.
\begin{equation}
z = s - 1
\label{eq:3}
\end{equation}
\begin{equation}
p' = k - p - 1
\label{eq:4}
\end{equation}
The steps to perform a transposed convolution are given as follows:
\begin{itemize}
\item Calulate new parameters \(z\) and \(p'\)
\item Between each rows and columns of the input, insert \(z\) number of zeros. This increases the size of the input to \((2*i-1),(2*i-1)\)
\item Pad the modified input image with \(p'\) number of zeros
\item Carry out standard convolution on the image generated from step 3 with a stride length of 1
\end{itemize}
In the Fig. \ref{fig:f7} are given the steps mentioned above.
\begin{figure}[ht]
\centering
\includegraphics[width=0.95\textwidth,height=0.95\textheight,keepaspectratio]{images/transposed_conv.png}
\captionsetup{justification=centering}
\caption{Transposed convolution}
\label{fig:f7}
\end{figure}
Additionally, Fig. \ref{fig:f8} presents examples of transposed convolutions with different stride and padding.
\begin{figure}[ht]
\centering
\includegraphics[width=0.95\textwidth,height=0.95\textheight,keepaspectratio]{images/multiple_convs.png}
\captionsetup{justification=centering}
\caption{Transposed convolution examples}
\label{fig:f8}
\end{figure}
For a given size of input (\(i\)), kernel (\(k\)), padding (\(p\)) and stride (\(s\)), the size of the output feature map (\(o\)) generated is given by Eq. \eqref{eq:5}.
\begin{equation}
o = (i - 1) * s + k - 2p
\label{eq:5}
\end{equation}
It is important to note that transpose convolution layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal.
An implementation of the Upsampling technique in Pytorch can be seen at \textit{Upsampling.ipynb}. This code is a convolutional autoencoder where the encoder is a simple convolutional layer but the decoder is a set of nearest neighbors upsampling and convolutional layers. It is applied over the MNIST dataset to generate the input digit at the output of the network.
Similarly, an implementation of transpose convolution with Pytorch can be seen at \textit{Convolutional\_Autoencoder\_Exercise.ipynb}. The idea is very similar to the last code but with transpose convolutions instead of upsampling.
Finally, An implementation of a denoising autoencoder with CNNs can be seen at Denoising\_Autoencoder\_Exercise.ipynb. This file receives a noisy MNIST image and the neural network is able to reconstruct the image without noise. The main idea behind a denoising autoencoder is that the network is trained with noisy data and the output is the data without noise. This is one of the biggest application of autoencoders and as they are able to create a compressed representation of the data, they are very good at reconstructing the data without noise. This model is constructed with transposed convolutions.
It is important to mention that another advantage of autoencoders of autoencoders is that they learn a pretty descent representation of the weights of the a network just with the input. The autoencoders can be used as a initializer and then use the learned weights as the initial ones for a fine-tuning process. This kind of approach have shown good results and ease the training procedure of neural networks.
\printbibliography
\end{document}
| {
"alphanum_fraction": 0.7760949652,
"avg_line_length": 64.6296296296,
"ext": "tex",
"hexsha": "8cc71426bb03fdd4bb00c49984e0d425c5237e44",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a32be9472e889fd86ae71081cd9a87d839caf077",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "MikeS96/intro_deep_torch",
"max_forks_repo_path": "autoencoder/Notes/notes.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a32be9472e889fd86ae71081cd9a87d839caf077",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "MikeS96/intro_deep_torch",
"max_issues_repo_path": "autoencoder/Notes/notes.tex",
"max_line_length": 636,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a32be9472e889fd86ae71081cd9a87d839caf077",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "MikeS96/intro_deep_torch",
"max_stars_repo_path": "autoencoder/Notes/notes.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3026,
"size": 12215
} |
Dispersion (DSP) Package information is read from the file that is specified by ``DSP6'' as the file type. Only one DSP Package can be specified for a GWT model.
\vspace{5mm}
\subsubsection{Structure of Blocks}
\lstinputlisting[style=blockdefinition]{./mf6ivar/tex/gwt-dsp-options.dat}
\lstinputlisting[style=blockdefinition]{./mf6ivar/tex/gwt-dsp-griddata.dat}
\vspace{5mm}
\subsubsection{Explanation of Variables}
\begin{description}
\input{./mf6ivar/tex/gwt-dsp-desc.tex}
\end{description}
\vspace{5mm}
\subsubsection{Example Input File}
\lstinputlisting[style=inputfile]{./mf6ivar/examples/gwt-dsp-example.dat}
| {
"alphanum_fraction": 0.7842190016,
"avg_line_length": 34.5,
"ext": "tex",
"hexsha": "26846fc9880d8d9f4d8e26debe3703cbc3a5f4a7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "96f6e4972d16426dda356fa82f606337fc37e791",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "scottrp/modflow6",
"max_forks_repo_path": "doc/mf6io/gwt/dsp.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "96f6e4972d16426dda356fa82f606337fc37e791",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "scottrp/modflow6",
"max_issues_repo_path": "doc/mf6io/gwt/dsp.tex",
"max_line_length": 163,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "96f6e4972d16426dda356fa82f606337fc37e791",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "scottrp/modflow6",
"max_stars_repo_path": "doc/mf6io/gwt/dsp.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 176,
"size": 621
} |
\section{A tagless partial evaluator}\label{PE}
Surprisingly, we can write a partial evaluator using the idea above,
namely to build object terms using ordinary functions rather than data
constructors. We present this partial evaluator in a sequence of three
attempts. It uses no universal type and no tags
for object types. We then discuss residualization and binding-time
analysis. Our partial evaluator is a modular extension of the evaluator
in~\S\ref{S:interpreter-RL} and the compiler in~\S\ref{S:compiler}, in
that it uses the former to reduce static terms and the latter to build
dynamic terms.
\subsection{Avoiding polymorphic lift}
\label{S:PE-lift}
Roughly, a partial evaluator interprets each object term to yield either
a static (present-stage) term (using\ifshort\else\ the evaluator\fi~|R|) or
a dynamic (future-stage) term (using\ifshort\else\ the
compiler\fi~|C|). To
distinguish between static and dynamic terms, we might try to define
|repr| in the partial evaluator as
\texttt{type ('c,'dv)\,repr
= S0 of ('c,'dv)\,R.repr~}\Verb+|+\texttt{~D0 of ('c,'dv)\,C.repr}.
Integer and boolean literals are immediate, present-stage
values. Addition yields a static term (using~|R.add|) if and only
if both operands are static; otherwise we extract the dynamic terms
from the operands and add them using~|C.add|. We use |C.int| to
convert from the static term |('c,int) R.repr|, which is just |int|,
to the dynamic term.
Whereas |mul| and |leq| are as easy to define as |add|, we encounter
a problem with |if_|. Suppose that the first argument to |if_| is a dynamic term
(of type |('c,bool) C.repr|), the second a static term
(of type |('c,'a) R.repr|), and the third a
dynamic term (of type |('c,'a) C.repr|). We then need to convert
the static term to dynamic, but there is no polymorphic ``lift''
function, of type |'a -> ('c,'a) C.repr|, to send a value to the future stage
\citep{xi-guarded,WalidPOPL03}.
Our |Symantics| only includes separate lifting methods |bool| and
|int|, not a parametrically polymorphic lifting method, for good reason:
When compiling to a first-order target language such as machine code,
booleans, integers, and functions may well be represented differently.
Thus, compiling polymorphic lift requires intensional type
analysis. To avoid needing polymorphic lift, we turn to
Asai's technique \citep{asai-binding-time,sumii-hybrid}:
build a dynamic term
alongside every static term.
\subsection{Delaying binding-time analysis}
\label{S:PE-problem}
We switch to the data type
\texttt{type ('c,'dv) repr = P1 of ('c,'dv) R.repr option * ('c,'dv) C.repr}
so that a partially evaluated term always contains a dynamic
component and sometimes contains a static component. By
distributivity, the two
alternative constructors of an |option| value, |Some| and |None|,
tag each partially evaluated term with a phase: either present or
future. This tag is not an object type tag: all pattern matching below
is exhaustive. Because the future-stage component is always available, we
can now define the polymorphic function
|let abstr1 (P1 (_,dyn)) = dyn| of type
\texttt{('c,'dv) repr -> ('c,'dv) C.repr}
to extract it without requiring polymorphic lift into~|C|. We then try
to define the interpreter |P1|---and get as far as the first-order
constructs of our object language, including |if_|.
\begin{code3}
module P1 : Symantics = struct
let int (x:int) = P1 (Some (R.int x), C.int x)
let add e1 e2 = match (e1,e2) with
| (P1 (Some n1,_),P1 (Some n2,_)) -> int (R.add n1 n2)
| _ -> P1 (None,(C.add (abstr1 e1) (abstr1 e2)))
let if_ = function
| P1 (Some s,_) -> fun et ee -> if s then et () else ee ()
| eb -> fun et ee -> P1 (None, C.if_ (abstr1 eb)
(fun () -> abstr1 (et ()))
(fun () -> abstr1 (ee ())))
\end{code3}
However, we stumble on functions. According to our
definition of~|P1|, a partially evaluated object function, such as the
identity $\fun{x}x$ embedded in OCaml as |lam (fun x -> x)|\texttt{ :
}|('c,'a->'a) P1.repr|, consists of a dynamic part
(type |('c,'a->'a) C.repr|) and
maybe a static part (type |('c,'a->'a) R.repr|). The dynamic part is useful
when this function is passed to another function that is only
dynamically known, as in $\fun{k}k(\fun{x}x)$. The static part is
useful when this function is applied to a static argument, as in
$(\fun{x}x)\True$. Neither part, however, lets us \emph{partially}
evaluate the function, that is, compute as much as possible statically
when it is applied to a mix of static and dynamic inputs. For example,
the partial evaluator should turn $\fun{n}(\fun{x}x)n$ into $\fun{n}n$
by substituting $n$ for~$x$ in the body of $\fun{x}x$ even though $n$ is
not statically known. The same static function, applied to
different static arguments, can give both static and dynamic results: we
want to simplify $(\fun{y}x\times y)0$ to~$0$ but $(\fun{y}x\times y)1$
to~$x$.
To enable these simplifications, we delay binding-time analysis
for a static function until it is applied, that is, until |lam f|
appears as the argument of |app|. To do so, we have to incorporate |f|
as it is into the |P1.repr| data structure: the representation
for a function type |'a->'b| should be one of
\begin{code3}
S1 of ('c,'a) repr -> ('c,'b) repr | E1 of ('c,'a->'b) C.repr
P1 of (('c,'a) repr -> ('c,'b) repr) option * ('c,'a->'b) C.repr
\end{code3}
unlike |P1.repr| of |int| or |bool|.
That is, we need a nonparametric data type, something akin to
type-indexed functions and type-indexed types, which
\citet{oliveira-typecase} dub the \emph{typecase} design pattern.
Thus, typed partial evaluation, like typed CPS transformation,
inductively defines a map from source types to target types that
performs case distinction on the source type. In Haskell, typecase
can be equivalently implemented either with GADTs or with
type-class functional dependencies
\citep{oliveira-typecase}. The accompanying code shows both
approaches, neither portable to OCaml. In addition,
the problem of nonexhaustive pattern\hyp matching reappears in
the GADT approach because GHC 6.6.1 cannot see that a particular
type of a GADT value precludes certain constructors.
Thus GADTs fail to
make it \emph{syntactically} apparent that pattern matching is exhaustive.
\subsection{The ``final'' solution}
\label{S:PE-solution}
Let us re-examine the problem in~\S\ref{S:PE-problem}. What we
would ideally like is to write
\begingroup\sloppy
\texttt{type ('c,'dv) repr = P1 of (repr\_pe ('c,'dv)) R.repr option * ('c,'dv) C.repr}
where |repr_pe| is the type function defined
% inductively because P below depends on repr_pe
by
\begin{code3}
repr_pe ('c,int) = ('c,int); repr_pe ('c,bool) = ('c,bool)
repr_pe ('c,'a->'b) = ('c,'a) repr -> ('c,'b) repr
\end{code3}
\endgroup\noindent
Although we can use type classes to define this type function
in Haskell, that is not portable to MetaOCaml. However,
these three typecase alternatives are already present in existing
methods of |Symantics|.
A simple and portable solution thus emerges: we bake |repr_pe|
into the signature |Symantics|.
We recall from Figure~\ref{fig:ocaml-simple} in~\S\ref{encoding} that the |repr| type
constructor took two arguments |'c| and~|'dv|. We add an argument
|'sv| for the result of applying |repr_pe| to~|'dv|.
Figure~\ref{fig:ocaml} shows the new signature.
\begin{figure}
\begin{floatrule}
\smallskip
\begin{code2}
module type Symantics = sig type ('c,'sv,'dv) repr
val int : int -> ('c,int,int) repr
val lam : (('c,'sa,'da) repr -> ('c,'sb,'db) repr as 'x)
-> ('c,'x,'da -> 'db) repr
val app : ('c,'x,'da -> 'db) repr
-> (('c,'sa,'da) repr -> ('c,'sb,'db) repr as 'x)
val fix : ('x -> 'x) -> (('c, ('c,'sa,'da) repr -> ('c,'sb,'db) repr,
'da -> 'db) repr as 'x)
val add : ('c,int,int) repr -> ('c,int,int) repr -> ('c,int,int) repr
val if_ : ('c,bool,bool) repr
-> (unit->'x) -> (unit->'x) -> (('c,'sa,'da) repr as 'x) end
\end{code2}
\medskip
\end{floatrule}
\caption{A (Meta)OCaml embedding of our object language that supports
partial evaluation \ifshort \protect\linebreak[2] (\texttt{bool},
\texttt{mul}, \texttt{leq} are elided)\fi}
\label{fig:ocaml}
\end{figure}
\begin{figure}
\begin{floatrule}
\begin{code2}
module P = struct
type ('c,'sv,'dv) repr = {st: 'sv option; dy: ('c,'dv) code}
let abstr {dy = x} = x let pdyn x = {st = None; dy = x}
let int (x:int) = {st = Some (R.int x); dy = C.int x}
let add e1 e2 = match e1, e2 with
| {st = Some 0}, e | e, {st = Some 0} -> e
| {st = Some m}, {st = Some n} -> int (R.add m n)
| _ -> pdyn (C.add (abstr e1) (abstr e2))
let if_ eb et ee = match eb with
| {st = Some b} -> if b then et () else ee ()
| _ -> pdyn (C.if_ (abstr eb) (fun () -> abstr (et ()))
(fun () -> abstr (ee ())))
let lam f = {st = Some f; dy = C.lam (fun x -> abstr (f (pdyn x)))}
let app ef ea = match ef with {st = Some f} -> f ea
| _ -> pdyn (C.app (abstr ef) (abstr ea)) end
\end{code2}
\medskip
\end{floatrule}
\caption{Our partial evaluator (\ifshort \texttt{bool}, \texttt{mul},
\texttt{leq} and \texttt{fix} \else \texttt{mul} and \texttt{leq} \fi
are elided)}
\label{fig:pe}
\end{figure}
The interpreters |R|, |L| and~|C| above only use the old
type arguments |'c| and~|'dv|, which are treated by the new signature
in the same way. Hence, all that needs to change in these interpreters
to match the new signature is to add a phantom type
argument~|'sv| to~|repr|.
For example, the compiler |C| now begins
\ifshort
\texttt{module C = struct
type ('c,'sv,'dv) repr = ('c,'dv) code}
\else
\begin{code}
module C = struct
type ('c,'sv,'dv) repr = ('c,'dv) code
\end{code}
\fi
with the rest the same.
In contrast, the partial evaluator~|P| relies on the type argument |'sv|.
Figure~\ref{fig:pe} shows the partial evaluator~|P|.
Its type |repr| literally expresses the type equation for |repr_pe| above.
The function |abstr|
extracts a future-stage code value from the result of
partial evaluation. Conversely, the function |pdyn| injects a
code value into the |repr| type. As
in~\S\ref{S:PE-problem}, we build dynamic terms alongside
any static ones to avoid polymorphic lift.
The static portion of the interpretation of |lam f| is |Some f|,
which just wraps the HOAS
function |f|. The interpretation of |app ef ea|
checks to see if |ef| is such a wrapped
HOAS function. If it is, we apply |f| to the
concrete argument |ea|, giving us a chance to perform static
computations (see the example below). If
|ef| has only a dynamic part, we residualize.
To illustrate how to add optimizations, we improve |add| (and |mul|,
elided) to simplify the generated code using the monoid (and ring)
structure of~|int|: not only is addition performed statically
(using~|R|) when both operands are statically known, but it is
eliminated when one operand is statically~$0$; similarly for
multiplication by~$0$ or~$1$.
\ifshort
Such optimizations can be quite effective
\else
Such algebraic simplifications are easy
to abstract over the specific domain (such as monoid or ring) where they
apply. These simplifications and abstractions help a lot
\fi
in a large language with more base types and primitive operations.
\ifshort\else
Incidentally, the code actually contains a more general implementation
mechanism for such features, inspired in part by previous work in
generative linear algebra~\citep{CaretteKiselyov05}.
\fi
Any partial evaluator must decide how much to unfold recursion
statically: unfolding too little can degrade the residual code, whereas
unfolding too much risks nontermination. Our partial evaluator is no
exception, because our object language includes |fix|. The code in
Figure~\ref{fig:pe} takes the na\"\i ve approach of ``going all the
way'', that is, whenever the
argument is static, we unfold |fix| rather than residualize it.
In the accompanying source code is a conservative
alternative |P.fix| that unfolds recursion only once, then residualizes.
Many sophisticated approaches have been developed to decide how much to unfold
\citep{jones-partial}, but this issue is orthogonal to our presentation.
\ifshort\else
A separate concern in our treatment of |fix| is possible code bloat in
the residual program, which calls for let-insertion
\citep{SwadiTahaKiselyovPasalic2006}.
\fi
Given this implementation of~|P|, our running example
\ifshort
\texttt{let module E\,=\,EX(P) in E.test1 ()}
\else
\begin{code}
let module E = EX(P) in E.test1 ()
\end{code}
\fi
evaluates to
\ifshort
|{P.st = Some true; P.dy = .<true>.}|
\else
\begin{code}
{P.st = Some true; P.dy = .<true>.}
\end{code}
\fi
of type |('a, bool, bool) P.repr|. Unlike with~|C| in~\S\ref{S:compiler},
a $\beta$-reduction has been statically performed to yield |true|. More
interestingly, whereas |testpowfix7| compiles to a code value with many
$\beta$-redexes in~\S\ref{S:compiler}, the partial evaluation
\ifshort
\texttt{let module E = EX(P) in E.testpowfix7}
\else
\begin{code}
let module E = EX(P) in E.testpowfix7
\end{code}
\fi
gives the desired result
\ifshort\vspace*{-0.7em}\fi
\begin{code}
{P.st = Some <fun>;
P.dy = .<fun x -> x * (x * (x * (x * (x * (x * x)))))>.}
\end{code}
\ifshort\vspace*{-0.7em}\fi
\noindent All pattern\hyp
matching in~|P| is \emph{syntactically} exhaustive, so it is patent to the
metalanguage implementation that |P| never gets stuck. Further, all
pattern\hyp matching occurs
during partial evaluation, only to check if a value is known statically,
never what type it has. In other words, our partial evaluator tags
phases (with |Some| and |None|) but not object types.
| {
"alphanum_fraction": 0.7051170228,
"avg_line_length": 42.99375,
"ext": "tex",
"hexsha": "bcaa49d0e013295225102e9f9ecad0ff34b52ec3",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "964bd1b4c88d5713527e115529ec762ba3f0dd5f",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "JacquesCarette/finally-tagless",
"max_forks_repo_path": "tagless/tagless-final-aplas.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "964bd1b4c88d5713527e115529ec762ba3f0dd5f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "JacquesCarette/finally-tagless",
"max_issues_repo_path": "tagless/tagless-final-aplas.tex",
"max_line_length": 87,
"max_stars_count": 30,
"max_stars_repo_head_hexsha": "964bd1b4c88d5713527e115529ec762ba3f0dd5f",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "JacquesCarette/finally-tagless",
"max_stars_repo_path": "tagless/tagless-final-aplas.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-05T01:28:52.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-11-29T15:58:25.000Z",
"num_tokens": 4159,
"size": 13758
} |
\chapter{Installation}
This chapter shows how {\ViennaFVM} can be integrated into a project and how
the examples are built. The necessary steps are outlined for several different
platforms, but we could not check every possible combination of hardware,
operating system, and compiler. If you experience any trouble, please write to
the mailing list at \\
\begin{center}
\texttt{viennafvm-support$@$lists.sourceforge.net}
\end{center}
% -----------------------------------------------------------------------------
% -----------------------------------------------------------------------------
\section{Dependencies}
% -----------------------------------------------------------------------------
% -----------------------------------------------------------------------------
\label{dependencies}
\begin{itemize}
\item A recent C++ compiler (e.g.~{\GCC} version 4.2.x or above and Visual C++
2008 or above are known to work)
\item {\CMake}~\cite{cmake} as build system (optional, but recommended
for building the examples)
\end{itemize}
\section{Generic Installation of ViennaFVM} \label{sec:viennafvm-installation}
Since {\ViennaFVM} is a header-only library, it is sufficient to copy the
\lstinline|viennafvm/| source folder either into your project folder or to your global system
include path.
On Unix-like operating systems, the global system include path is usually \lstinline|/usr/include/| or \lstinline|/usr/local/include/|.
On Windows, the situation strongly depends on your development environment. We
advise to consult the documentation of the compiler on how to set the include
path correctly. With Visual Studio 9.0 this is usually something like
\texttt{C:$\setminus$Program Files$\setminus$Microsoft Visual Studio
9.0$\setminus$VC$\setminus$include}
and can be set in \texttt{Tools -> Options -> Projects and Solutions ->
VC++-\-Directories}.
% -----------------------------------------------------------------------------
% -----------------------------------------------------------------------------
\section{Building the Examples and Tutorials}
% -----------------------------------------------------------------------------
% -----------------------------------------------------------------------------
For building the examples, we suppose that {\CMake} is properly set up
on your system. The various examples and their purpose are listed in
Tab.~\ref{tab:tutorial-dependencies}.
\begin{table}[tb]
\begin{center}
\begin{tabular}{l|p{9.3cm}}
File & Purpose\\
\hline
\texttt{poisson\_2d.cpp} & Poisson equation in two spatial dimensions
\end{tabular}
\caption{Overview of the sample applications in the \texttt{examples/tutorial} folder}
\label{tab:tutorial-dependencies}
\end{center}
\end{table}
\subsection{Linux}
To build the examples, open a terminal and change to:
\begin{lstlisting}
$> cd /your-ViennaFVM-path/build/
\end{lstlisting}
Execute
\begin{lstlisting}
$> cmake ..
\end{lstlisting}
to obtain a Makefile and type
\begin{lstlisting}
$> make
\end{lstlisting}
to build the examples. If desired, one can build each example separately instead:
\begin{lstlisting}
$> make poisson_2d #builds the algorithms tutorial
\end{lstlisting}
\TIP{Speed up the building process by using multiple concurrent jobs, e.g. \keyword{make -j4}.}
Some of the tutorials need to access mesh data in \texttt{examples/data/}.
To ensure that the data is accessible, please execute all tutorials directly from the \texttt{build} folder, e.g.
\begin{lstlisting}
$ build> examples/tutorial/poisson_2d
\end{lstlisting}
\subsection{Mac OS X}
\label{apple}
The tools mentioned in Section \ref{dependencies} are available on
Macintosh platforms too.
For the {\GCC} compiler the Xcode~\cite{xcode} package has to be installed.
To install {\CMake}, external portation tools such as
Fink~\cite{fink}, DarwinPorts~\cite{darwinports},
or MacPorts~\cite{macports} have to be used.
The build process of {\ViennaFVM} is similar to Linux.
\subsection{Windows}
In the following the procedure is outlined for \texttt{Visual Studio}: Assuming
that {\CMake} is already installed, \texttt{Visual Studio} solution
and project files can be created using {\CMake}:
\begin{itemize}
\item Open the {\CMake} GUI.
\item Set the {\ViennaFVM} base directory as source directory.
\item Set the \texttt{build/} directory as build directory.
\item Click on 'Configure' and select the appropriate generator
(e.g.~\texttt{Visual Studio 9 2008})
\item Click on 'Generate' (you may need to click on 'Configure' one more time
before you can click on 'Generate')
\item The project files can now be found in the {\ViennaFVM} build directory,
where they can be opened and compiled with Visual Studio (provided that the
include and library paths are set correctly, see
Sec.~\ref{sec:viennafvm-installation}).
\end{itemize}
Note that the examples should be executed from the \texttt{build/} folder respectively in order to access the correct input files.
| {
"alphanum_fraction": 0.6708324934,
"avg_line_length": 40.6639344262,
"ext": "tex",
"hexsha": "6a2f34f1d8f539ad6871fd47304271ecb335afbc",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-16T07:20:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-16T07:20:10.000Z",
"max_forks_repo_head_hexsha": "0639e725b1947becee4e60793c2e814fb0e51f80",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "viennafvm/viennafvm-dev",
"max_forks_repo_path": "doc/manual/installation.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0639e725b1947becee4e60793c2e814fb0e51f80",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "viennafvm/viennafvm-dev",
"max_issues_repo_path": "doc/manual/installation.tex",
"max_line_length": 135,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "0639e725b1947becee4e60793c2e814fb0e51f80",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "viennafvm/viennafvm-dev",
"max_stars_repo_path": "doc/manual/installation.tex",
"max_stars_repo_stars_event_max_datetime": "2019-11-16T06:40:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-05-04T20:57:26.000Z",
"num_tokens": 1149,
"size": 4961
} |
\section{Introduction}
\label{sec:intro_wavefunction}
This section describes the input blocks associated with the specification of the trial wavefunction in a QMCPACK calculation. These sections are contained within the \ixml{<wavefunction> ... </wavefunction>} xml blocks. \textbf{Users are expected to rely on converters to generate the input blocks described in this section.} The converters and the workflows are designed such that input blocks require minimum modifications from users. Unless the workflow requires modification of wavefunction blocks (e.g., setting the cutoff in a multideterminant calculation), only expert users should directly alter them.
The trial wavefunction in QMCPACK has a general product form:
\begin{equation}
\Psi_T(\vec{r}) = \prod_k \Theta_k(\vec{r}) ,
\end{equation}
where each $\Theta_k(\vec{r})$ is a function of the electron coordinates (and possibly ionic coordinates and variational parameters). For problems involving electrons, the overall trial wavefunction must be antisymmetric with respect to electron exchange, so at least one of the functions in the product must be antisymmetric. Notice that, although QMCPACK allows for the construction of arbitrary trial wavefunctions based on the functions implemented in the code (e.g., slater determinants, jastrow functions), the user must make sure that a correct wavefunction is used for the problem at hand. From here on, we assume a standard trial wavefunction for an electronic structure problem
\begin{equation}
\Psi_T(\vec{r}) = \textit{A}(\vec{r}) \prod_k \textit{J}_k(\vec{r}),
\end{equation}
where $\textit{A}(\vec{r})$ is one of the antisymmetric functions: (1) slater determinant, (2) multislater determinant, or (3) pfaffian and $\textit{J}_k$ is any of the Jastrow functions (described in Section~\ref{sec:jastrow}). The antisymmetric functions are built from a set of single particle orbitals (\texttt{sposet}). QMCPACK implements four different types of \texttt{sposet}, described in the following section. Each \texttt{sposet} is designed for a different type of calculation, so their definition and generation varies accordingly.
| {
"alphanum_fraction": 0.7918994413,
"avg_line_length": 143.2,
"ext": "tex",
"hexsha": "642c74a37d3c87e799570240248a33f50a18ad19",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2",
"max_forks_repo_licenses": [
"NCSA"
],
"max_forks_repo_name": "djstaros/qmcpack",
"max_forks_repo_path": "legacy_manual/intro_wavefunction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"NCSA"
],
"max_issues_repo_name": "djstaros/qmcpack",
"max_issues_repo_path": "legacy_manual/intro_wavefunction.tex",
"max_line_length": 688,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "280f67e638bae280448b47fa618f05b848c530d2",
"max_stars_repo_licenses": [
"NCSA"
],
"max_stars_repo_name": "djstaros/qmcpack",
"max_stars_repo_path": "legacy_manual/intro_wavefunction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 509,
"size": 2148
} |
\documentclass{article}
\title{My first document}
\author{Me}
\begin{document}
\maketitle
\section{Hello world!}
I made this document with the knowledge I got from a course\cite{bash-course} online!
\begin{thebibliography}{9}
\bibitem{bash-course}
Bob Stienen,
\textit{Introduction into bash},
http://www.github.com/bstienen/bash-course.
Accessed on September 22nd, 2019.
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.7541371158,
"avg_line_length": 19.2272727273,
"ext": "tex",
"hexsha": "81077edb9d70fd5fe1e8b07ff8f744e64122c63b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2678661c869a59f5f6b8cac0b1d7cfbe3563f109",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bstienen/bash-course",
"max_forks_repo_path": "Lesson 4: Networking and programming/files/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2678661c869a59f5f6b8cac0b1d7cfbe3563f109",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bstienen/bash-course",
"max_issues_repo_path": "Lesson 4: Networking and programming/files/main.tex",
"max_line_length": 85,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2678661c869a59f5f6b8cac0b1d7cfbe3563f109",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bstienen/bash-course",
"max_stars_repo_path": "Lesson 4: Networking and programming/files/main.tex",
"max_stars_repo_stars_event_max_datetime": "2021-01-28T10:25:39.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-28T10:25:39.000Z",
"num_tokens": 123,
"size": 423
} |
\section{Graphviz}
\slide{Hello world}
{
\vspace{-1em}
\begin{center}
% NOTE: explain diagram in depth
\includegraphics[scale=0.8]{dot/sample}
\end{center}
}
\slide{Diagrams with seqdiag}
{
\vspace{-1em}
\begin{center}
\includegraphics[scale=0.5]{dot/connection}
\end{center}
}
| {
"alphanum_fraction": 0.719858156,
"avg_line_length": 13.4285714286,
"ext": "tex",
"hexsha": "5651cef74a6933a772e5cf888c5c5b77f4a40e0b",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-01-15T08:28:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-21T14:08:58.000Z",
"max_forks_repo_head_hexsha": "124a377a24e89cddb0531d1b87e56539c2793323",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "el-bart/beamer_cpp",
"max_forks_repo_path": "template/graphviz.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "124a377a24e89cddb0531d1b87e56539c2793323",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "el-bart/beamer_cpp",
"max_issues_repo_path": "template/graphviz.tex",
"max_line_length": 43,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "124a377a24e89cddb0531d1b87e56539c2793323",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "el-bart/beamer_cpp",
"max_stars_repo_path": "template/graphviz.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-15T08:28:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-01-14T06:20:13.000Z",
"num_tokens": 91,
"size": 282
} |
\documentclass[12pt]{article}
\usepackage[vmargin=1in,hmargin=1in]{geometry}
\usepackage{amsmath}
\usepackage[parfill]{parskip}
\usepackage{hyperref}
\usepackage{natbib}
\usepackage{bm}
\usepackage{amsfonts}
\usepackage{graphicx}
\usepackage{abstract}
\usepackage{lineno}
\usepackage{setspace}
\hypersetup{pdfstartview={Fit},hidelinks}
\newcommand{\bs}{{\bf s}}
\newcommand{\bsi}{{\bf s}_i}
\newcommand{\bx}{{\bf x}}
\newcommand{\bxj}{{\bf x}_j}
\newcommand{\by}{{\bf y}}
\newcommand{\bu}{{\bf u}}
\newcommand{\bui}{{\bf u}_i}
\newcommand{\but}{{\bf u}_{t}}
\newcommand{\buit}{{\bf u}_{it}}
\newcommand{\buip}{{\bf u}_{i,t-1}}
\newcommand{\ed}{\|\bx - \bx'\|}
\newcommand{\cS}{\mathcal{S} }
\title{Ecology Appendix S1 \\ Posterior distribution and Gibbs sampler \\ \it Modeling abundance, distribution, movement, and space
use with camera and telemetry data}
\author{Richard B. Chandler$^1$\footnote{Corresponding author: [email protected]}, Daniel A. Crawford$^2$, Elina P. Garrison$^3$, \\
Karl V. Miller$^1$, Michael J. Cherry$^2$}
\begin{document}
\maketitle
\vspace{12pt}
\begin{description}%[labelindent=1pt]%[leftmargin=1cm]%,labelwidth=\widthof{\bfseries Example:}]
% \large
\item[$^1$] Warnell School of Forestry and Natural Resources, University of Georgia %\\
\item[$^2$] Caesar Kleberg Wildlife Research Institute at Texas A\&M University-Kingsville %\\
\item[$^3$] Florida Fish and Wildlife Conservation Commission %\\
\end{description}
\clearpage
\section*{Posterior distribution}
The posterior distribution of the joint spatial capture-recapture
movement model (with constant $\sigma$ and data augmentation) is
\begin{multline}
p(p, \lambda_0, \sigma_{\mathrm det}, \rho, \sigma, \{\bu_{it}\}, \{\bs_i\}, \{z_i\}, \psi | {\bm y}^{\rm cap},{\bm y}) \propto \\
\left\{\prod_{i=1}^M p(y_i^{\rm cap}|z_i, p)
\left\{\prod_{t=1}^T\left\{\prod_{j=1}^Jp(y_{ijt}|z_i,\lambda_0,\sigma_{\rm det},\buit)
\right\}p(\buit|\bu_{i,t-1},\bsi,\rho,\sigma)\right\} %\times \\
p(\bsi)p(z_i|\psi)\right\} \times \\
p(p)p(\lambda_0)p(\sigma_{\rm det})p(\rho)p(\sigma)p(\psi) \\
% \label{eq:post}
% \tag{S1}
\end{multline}
where
\[
p(y_i^{\rm cap}|z_i, p) = \mathrm{Bern}(y_i|z_i\times p)
\]
\[
p(y_{ijt}|z_i,\lambda_0,\sigma_{\rm det},\buit) = \mathrm{Pois}(y_{ijt}|z_i \times \lambda^{\rm det}_{ijt})
\]
\[
p(\buit|\bu_{i,t-1},\bsi,\rho,\sigma) =
\begin{cases}
\mathrm{Norm}(\buit|\bsi+(\buip-\bsi)\rho, \mathrm{diag}(\sigma^2
- \sigma^2\rho^2)) & \mathrm{for}\quad t>1 \\
\mathrm{Norm}(\buit|\bsi, \mathrm{diag}(\sigma^2)) & \mathrm{for}\quad t=1 \\
\end {cases}
\]
\[
p(\bsi) = \mathrm{Unif}(\mathcal M)
\]
\[
p(z_i|\psi) = \mathrm{Bern}(z_i|\psi)
\]
and the other probability distributions are priors for the
parameters. Note that some or all of the $\buit$ locations could be
observed.
\clearpage
\section*{Gibbs sampler}
Sampling from the joint posterior is computationally challenging
because of the latent movement paths for the $M-n$ augmented
individuals. The burden can be reduced by marginalizing these latent
paths while retaining the activity centers $\{\bsi\}$. This is
accomplished using Eq. 6 in the manuscript and the probability density:
\[
p(0|\lambda_0,\sigma_{\rm det},\rho,\sigma,\bsi,z_i) = \mathrm{Bern}(0|\tilde{p}_i)
\]
The Gibbs sampler begins by initializing the unknown parameters and
then sampling from the following full conditional distributions.
Use Metropolis-Hastings (MH) to sample from:
\[
p(\rho,\sigma|\cdot) \propto \left\{\prod_{i=1}^n\prod_{t=1}^T
p(\buit|\bu_{i,t-1},\bsi,\rho,\sigma)\right\}\left\{\prod_{i=n+1}^M p(0|\lambda_0,\sigma_{\rm det},\rho,\sigma,\bsi,z_i)\right\}p(\rho)p(\sigma)
\]
Use MH to sample from:
\[
p(\lambda_0,\sigma_{\rm det}|\cdot) \propto \left\{\prod_{i=1}^n\prod_{j=1}^J\prod_{t=1}^T
p(y_{ijt}|z_i,\lambda_0,\sigma_{\rm det},\buit)\right\}\left\{\prod_{i=n+1}^M
p(0|\lambda_0,\sigma_{\rm det},\rho,\sigma,\bsi,z_i)\right\}p(\lambda_0)p(\sigma_{\rm det})
\]
Sample directly from
\[
p(\psi|{\bm z}) = \mathrm{Beta}\left(1+\sum_{i=1}^M z_i, 1+M-\sum_{i=1}^M z_i\right)
\]
For $i=n+1,\dots,M$, use MH to sample from
\[
p(z_i|\cdot) \propto p(y^{\rm cap}_i|z_i,p)
\left\{\prod_{j=1}^J\prod_{t=1}^T p(y_{ijt}|z_i,\lambda_0,\sigma_{\rm
det},\buit)\right\}
p(0|\lambda_0,\sigma_{\rm det},\rho,\sigma,\bsi,z_i)p(z_i|\psi)
\]
Use MH (or direct draw from beta full conditional) to sample from
\[
p(p|\cdot) \propto \prod_{i=1}^M p(y^{\rm cap}_i|z_i \times p)p(p)
\]
For $i=1,\dots,n$, use MH to sample from
\[
p(\bsi|\cdot) \propto p(\buit|\rho,\sigma,\bsi)p(\bsi)
\]
For $i=n+1,\dots,M$, use MH to sample from
\[
p(\bsi|\cdot) \propto p(0|\lambda_0,\sigma_{\rm det},\rho,\sigma,\bsi,z_i)p(\bsi)
\]
For $i=1,\dots,n$ and for cases where $\buit$ is not observed, use MH to sample from
\[
p(\buit|\cdot) \propto
p(\bu_{i,t+1}|\bu_{i,t},\bsi,\rho,\sigma)p(\bu_{i,t}|\bu_{i,t-1},\bsi,\rho,\sigma)\prod_{j=1}^J
p(y_{ijt}|\lambda_0,\sigma_{\rm det},\buit)
\]
\end{document}
| {
"alphanum_fraction": 0.6607354686,
"avg_line_length": 31.4161490683,
"ext": "tex",
"hexsha": "f956529ca468576aeb7bb7261379b8142b6aabbc",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "30d3ed9f8c3f554b6867f6dc923a6fa143de4551",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rbchan/scr-move",
"max_forks_repo_path": "supp/Appendix-S1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "30d3ed9f8c3f554b6867f6dc923a6fa143de4551",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rbchan/scr-move",
"max_issues_repo_path": "supp/Appendix-S1.tex",
"max_line_length": 148,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "30d3ed9f8c3f554b6867f6dc923a6fa143de4551",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rbchan/scr-move",
"max_stars_repo_path": "supp/Appendix-S1.tex",
"max_stars_repo_stars_event_max_datetime": "2021-11-25T06:45:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-08T20:07:08.000Z",
"num_tokens": 1931,
"size": 5058
} |
\lab{Algorithms}{Conditioning and Stability}{Conditioning and Stability}
\objective{Explore the condition of problems and the stability of algorithms.}
\label{lab:conditioning_stability}
\begin{equation*}
\mathlarger{ \mathlarger{ \mathlarger{f:X \rightarrow Y}}}
\end{equation*}
\begin{center} vs. \end{center}
\begin{equation*}
\mathlarger{ \mathlarger{ \mathlarger{\hat{f}:\hat{X} \rightarrow \hat{Y}}}}
\end{equation*}
%\begin{eqnarray}
%\mathlarger{\mathlarger{\mathlarger{f:X \rightarrow Y}}}\\ \mathlarger{\mathlarger{\mathlarger{ f:\hat{X} \rightarrow \hat{Y} }}}\\
%\mathlarger{\mathlarger{\mathlarger{ \hat{f}:X \rightarrow Y }}}%\\
%%\mathlarger{\mathlarger{\mathlarger{ \hat{f}:\hat{X} \rightarrow \hat{Y} }}}
% \end{eqnarray}
%
\section*{Conditioning of a Problem}
%\begin{equation*}
%\mathlarger{ \mathlarger{ \mathlarger{f:\hat{X} \rightarrow \hat{Y}}}}
%\end{equation*}
A \emph{problem} is a function $f:X \rightarrow Y$, where $X$ is a vector space of data and $Y$ is a vector space of solutions. In a perfect world, this function takes an exact input $x$ and correctly returns the exact answer $y$. However we are working in the finite world of computers.
Since $X$ and $Y$ are vector spaces of infinite cardinality, we cannot represent these spaces on a computer perfectly. We can only represent finite subsets $\tilde{X} \subset X$ and $\tilde{Y} \subset Y$. This means that the best we can do is approximate $x \in X$ with some $\tilde{x} \in \tilde{X}$ and return an approximate correct answer $\tilde{y} \in \tilde{Y}$. But if we change the input from $x$ to $\tilde{x}$, how much will our output change?
We define the \emph{condition number} of $f$ at $x$. Let $\delta x$ denote a small perturbation of $x$, and let $\delta f = f(x+\delta x) - f(x)$. Then the \emph{absolute condition number} of $f$ at $x$ is
\[
\hat{\mathcal{K}} = \lim_{\delta \rightarrow 0} \sup_{\norm{\delta x} \leq \delta} { \frac{\norm{\delta f}}{\norm{\delta x}} }
\]
This is the ratio of output error to input error. These are \emph{absolute} errors; however, the error introduced by floating point arithmetic on a computer is \emph{relative} error. Therefore we define also \emph{relative condition number}:
\begin{equation}
\mathcal{K} = \lim_{\delta \rightarrow 0} \sup_{\norm{\delta x} \leq \delta} \left({ \frac{\norm{\delta f}}{\norm{f(x)}} } \middle/ { \frac{\norm{\delta x}}{\norm{x}} }\right)
\label{def:relativeconditionnumber}
\end{equation}
Relative condition number is usually a more useful concept that absolute condition number.
We say that a problem $f$ is \emph{well-conditioned} at $x$ if $\mathcal{K}$ is small. In this case, small changes to $x$ result in small changes to $f(x)$. We say $f$ is \emph{ill-conditioned} at $x$ if $\mathcal{K}$ is large. In this case, small changes to $x$ may result in large changes to $f(x)$.
\begin{example}[The Wilkinson Polynomial]
Polynomial root finding is a notoriously ill-conditioned problem. Root-finding for a degree $n$ polynomial can be described by a function $f$, which takes a vector $x$ of $n+1$ coefficients and returns a vector $y$ of $n$ roots. If two of the roots in $y$ are close to each other, the condition number of $f$ at $x$ will be large. In fact if $y$ contains repeated roots, the condition number is $\infty$.
James Wilkinson illustrated that root-finding can be extremely ill-conditioned even if the roots are far apart. His classic example is called the \emph{Wilkinson polynomial}:
\[
w(x) = (x-1)(x-2)(x-3)\hdots(x-20)
\]
Expanding this polynomial to obtain coefficients gives
\[\begin{array}{cl}
w(x)=& x^{20}-210x^{19} +20615x^{18} -1256850 x^{17} +53327946 x^{16}\\
&-1672280820x^{15} + 40171771630x^{14} -756111184500x^{13}\\
&+11310276995381 x^{12} -135585182899530 x^{11}\\
&+1307535010540395 x^{10} -10142299865511450 x^9\\
&+63030812099294896 x^8 -311333643161390640 x^7\\
&+1206647803780373360 x^6 -3599979517947607200 x^5\\
&+8037811822645051776 x^4 -12870931245150988800 x^3\\
& +13803759753640704000 x^2 -8752948036761600000 x\\
& +2432902008176640000
\end{array} \]
Figure \ref{fig:wilkinsonpolynomial} shows what happens to the roots of $w(x)$ when we make tiny perturbations to the coefficients. The condition number of this polynomial is $\mathcal{K} \approx 5.1 \times 10^{13}$.
\begin{figure}
\centering
\includegraphics[height=2in]{wilkinsonpolynomial.pdf}
\caption{The blue dots are the roots of w(x). The red dots in the complex plane are roots of a randomly perturbed polynomial with coefficients $\hat{a}_k = a_k(1+r_k)$, where the $r_k$ are normally distributed with mean $0$ and standard deviation $10^{-5}$.}
\label{fig:wilkinsonpolynomial}
\end{figure}
\end{example}
\begin{problem}
Write a function to reproduce Figure \ref{fig:wilkinsonpolynomial}.
Perturb $w(x)$ as described in the caption for this Figure.
Instead of plotting the roots for just one random pertubation of $w(x)$, plot the roots for several random perturbations.
Write another function that, given an array of roots for a polynomial, computes the polynomial object representing the polynomial with the desired roots, then finds the roots of the resulting polynomial and plots them with the original roots.
You may find the \li{numpy.poly()} function useful for this.
Be sure you use different colors for the different sets of points.
Using equispaced points at integer values, what does the degree of the polynomial have to be for this computation to return incorrect answers?
\end{problem}
\begin{example}[Calculating Eigenvalues]
Consider the problem of finding the eigenvectors of a given $n \times n$ matrix $A$. If $A$ is symmetric, then fortunately the eigenvalue problem is well-conditioned. However the problem can be extremely ill-conditioned for non-symmetric matrices, even if $n$ is small. For example, the two matrices
\[ \left( \begin{array}{cc}
1 & 1000 \\
0 & 1
\end{array} \right)
%
\left( \begin{array}{cc}
1 & 1000 \\
0.001 & 1
\end{array} \right)
\]
have eigenvalues $\{ 1,1\}$ and $\{0,2 \}$ respectively.
\end{example}
\begin{problem}
Let $f:X\rightarrow Y$ be a function that accepts an $n\times n$ matrix $A$ and returns an $n \times 1$ vector of eigenvalues $y$. Write your own function that accepts a matrix $A$ and estimates the condition number $\mathcal{K}$ of $f$ at $A$. Recall that the definition of $\mathcal{K}$ in Equation \ref{def:relativeconditionnumber} includes the limit as $\delta \rightarrow 0$ and the supremum over all possible $\delta x \leq \delta$. To mimic this on a computer, simply take $\delta$ to be very small. Then calculate $\mathcal{k} = { \frac{\norm{\delta f}}{\norm{f(x)}} } / { \frac{\norm{\delta x}}{\norm{x}} }$ a large number of times, taking a random $\delta x$ each time such that $\norm{\delta x} \leq \delta$. Let $\mathcal{K}$ be the largest of the values $\mathcal{k}$ calculated. (Hint: Let each $\delta x$ be a random normal matrix with mean $0$ and standard deviation $\approx 10^{-4}$.)
Experiment with inputting random symmetric and non-symmetric matrices. Remember that if $A$ is any matrix, then $A'A$ is symmetric. What kinds of condition numbers do you get for symmetric vs. non-symmetric matrices? What kinds of matrices give the biggest condition numbers for the eigenvalue problem?
\end{problem}
\begin{example}[Condition of a System of Equations]
Consider the system of equations
\[ Ax = b \]
for an $n \times n$ matrix $A$ and $n \times 1$ vectors $x$ and $b$. If we hold $A$ fixed, consider the problem of computing $b$ with respect to small changes in $x$. Alternatively, hold $A$ fixed and consider the problem of computing $x = A^{-1} b$ with respect to small changes in $b$. Finally, we may hold $b$ fixed and consider the problem of computing $x = A^{-1}b$ with respect to small changes in $A$.
It turns out that each of these problems has the \emph{same} relative condition number. This number is
\[
\mathcal{K} (A) = \norm{A}_2 \norm{A^{-1}}_2
\]
This is called the \emph{condition number of $A$}.
The condition number of a matrix can be computed using \li{numpy.linalg.cond()}.
Notice that the condition number of a matrix cannot be less that $1$.
It is almost always best to work with orthonormal matrices (or operations that can be mathematically represented by orthonormal matrices).
Since orthonormal matrices have a norm of $1$, as do their inverses, these transformations have the best possible condition number.
The low condition number of orthonormal matrices is one of the primary reasons that Householder reflections and Givens rotations are used in so many algorithms.
\end{example}
\section*{Stability of an Algorithm}
The analysis of problem-solving given above assumed that for a given input $\hat{x}$, we could determine the correct output $f(\hat{x})$ exactly. In other words, even if $\hat{x} \neq x$, we assumed that we could calculate $f(\hat{x})$ exactly although it may be different from $f(x)$.
In practice, our calculation of $f(\hat{x})$ is not always accurate, \emph{even if} the problem $f$ is well-conditioned. Our method of solving the problem may introduce new error. Sometimes the new error we introduce may be very large.
We define an \emph{algorithm} to be a method of solving a given problem. (The algorithm for solving a problem is different than the problem itself.) If an algorithm introduces large errors while solving a problem, the algorithm is called \emph{unstable}. A \emph{stable} algorithm does not introduce large error while solving a problem.
What could go wrong in an algorithm that would introduce new errors? Usually this happens when the algorithm breaks the problem into sub-problems, and one of these sub-problems is ill-conditioned. For example, the problem of computing the matrix $e^A$ may be well-conditioned for a given $A$, while the problem of computing the eigenvalues and eigenvectors of the same $A$ may be ill-conditioned. If an algorithm for computing $e^A$ relies on the intermediate step of computing the eigenvalues and eigenvectors, new error will be introduced.
As another example, take the problem of computing the eigenvalues of a matrix $A$. If $A$ is symmetric, the eigenvalue problem is well-conditioned. However, suppose we use an algorithm that first computes the coefficients of the characteristic polynomial, and then finds the roots of that polynomial. Unavoidably, tiny errors will creep in when we compute the coefficients of the characteristic polynomial. Since root-finding is an ill-conditioned problem, these tiny errors will be magnified. This algorithm is unstable.
\begin{warn}
Be careful to not confuse the stability of an algorithm with the conditioning of a problem.
These are two very different things.
A stable algorithm is, roughly speaking, an algorithm that will return a correct result if the problem is well conditioned.
If a problem is poorly conditioned, special algorithmic changes may be necessary to ensure a reasonable degree of accuracy.
\end{warn}
\begin{example}[A Simple but Unstable Algorithm]
Consider
\[\int_0^1 x^n e^{x - 1} dx\]
It is easily seen that this integral is always positive and always less than $1$.
It can be shown that, when $n > 1$, the value for this integral is always equal to
\[\left(-1\right)^{n} !n + \left(-1\right)^{n + 1} \frac{n!}{e}\]
where $!n$ is a combinatorial function called a derangement (it is equal to the number of permutations of a set that change the position of each element).
$!n$ is given by the recurrence relation $\left(n - 1\right)\left(!\left(n - 1\right) + !\left(n - 2\right)\right)$ and is defined to have the initial values $!0 = 1$ and $!1 = 0$.
There is a similar recurrence relation for the factorial function.
Given these recurrence relations, we can compute the value for this integral as using the following blocks of code:
\begin{lstlisting}
import numpy as np
def derangement(n):
d0, d1 = 1, 0
for i in xrange(1, n):
d0, d1 = d1, i * (d0 + d1)
return d1
def factorial(n):
d0, d1 = 1, 1
for i in xrange(1, n):
d0, d1 = d1, i * (d0 + d1)
return d1
def integral(n):
return (-1)**n * derangement(n) + (-1)**(n-1) * factorial(n) / np.e
\end{lstlisting}
Unfortunately, since we are taking the difference of large floating point numbers that are very close to one another, this algorithm only gives correct values for the first few integrals.
The output is shown in Table \ref{table:unstable_computation}.
\begin{table}
\centering
\begin{tabular}{|l|l|l|}
\hline
Integrand & Computed Value & Actual Value \\
\hline
$x^{1}e^{x}$: & $0.367879441171$ & $0.367879441171$ \\
$x^{5}e^{x}$: & $0.145532940573$ & $0.145532940573$ \\
$x^{10}e^{x}$: & $0.0838770701084$ & $0.0838770701034$ \\
$x^{15}e^{x}$: & $0.0590209960938$ & $0.0590175408793$ \\
$x^{20}e^{x}$: & $0.0$ & $0.0455448840758$ \\
$x^{25}e^{x}$: & $1073741824.0$ & $0.0370862144237$ \\
$x^{30}e^{x}$: & $-1.80143985095 \cdot 10^{16}$ & $0.0312796739322$ \\
$x^{35}e^{x}$: & $6.04462909807 \cdot 10^{23}$ & $0.0270462894091$ \\
$x^{40}e^{x}$: & $0.0$ & $0.023822728669$ \\
$x^{45}e^{x}$: & $0.0$ & $0.0212860390856$ \\
$x^{50}e^{x}$: & $1.46150163733 \cdot 10^{48}$ & $0.0192377544343$ \\
\hline
\end{tabular}
\caption{Inaccuracy of values computed using an unstable algorithm.}
\label{table:unstable_computation}
\end{table}
The algorithms that we have studied to solve linear systems have different levels of stability.
LU decomposition (with pivoting) is usually good enough, but there are some pathological examples of matrices that cause it to break down.
QR decomposition (with pivoting) is generally considered to be a better option than the LU decomposition.
Solving a linear system using the SVD is even more stable than the QR decomposition.
(Pivoting is a modification that is commonly made to the LU decomposition and QR decomposition algorithms we have discussed in earlier labs to make them more stable.)
Unfortunately, in this case, the algorithms that are more stable are also slower.
The LU decomposition is used by \li{scipy.linalg.solve()}.
The SVD is used by \li{scipy.linalg.lstsq()}.
Here is some code that uses the QR decomposition of a matrix $A$ to solve the linear system $A x = b$ for $x$.
It uses a lower-level function included in SciPy to perform the back substitution required to solve this system.
\begin{lstlisting}
from scipy import linalg as la
from scipy.linalg.flapack import dtrtrs
def qr_solve(A, b):
Q, R = la.qr(A)
return dtrtrs(R.T, Q.T.dot(b), lower=1, trans=1)[0]
\end{lstlisting}
A solution using a pivoted QR decomposition would be better, but this will be good enough for demonstration purposes.
The following are routines that generate matrices designed to show the relative benefits of each of these algorithms.
\begin{lstlisting}
from numpy.random import rand
def bad_arr_1(n):
""" Construct a specific pathological example
that breaks LU decomposition. These examples
are very rare, but they do exist.
Strictly speaking, the condition number
for this matrix isn't terribly bad. """
A = - np.ones((n, n))
A[:,:-1] = np.tril(A[:,:-1])
np.fill_diagonal(A, 1)
A[:,-1] = 1
return A
def bad_arr_2(n, peturbation = 1E-8):
""" Construct another matrix that is nearly singular
by computing A.dot(A.T) for a matrix A that is
not square and then adding some small changes
so it is not exactly singular. """
A = rand(n, n // 2)
return A.dot(A.T) + peturbation * rand(n, n)
\end{lstlisting}
\end{example}
\begin{problem}
For each of the array creation routines above, plot the error $\norm{\text{solve}\left(A, A b\right) - b}$ of each of the methods mentioned above for solving a linear system.
Use a log-scaled $y$-axis.
What do you observe?
\end{problem}
%\subsection*{Stable vs. Backward Stable Algorithms}
%\subsection*{Table}
%
%\begin{table}[h]
%\begin{tabular}{|l|l|l|}
%\hline \textbf{Well-Conditioned} & \textbf{Ill-Conditioned} & \textbf{Systems of Equations} \\
%{\parbox{0.3\textwidth}{\raggedleft
% \begin{itemize}[leftmargin=*]
% \item finding eigenvalues of a symmetric (or normal) matrix
% \item calculating $e^x$ for relatively small values of $x$
% \item calculating $\ln(x)$ for $x$ not close to $1$
% \end{itemize} }} &
%
%{\parbox{0.3\textwidth}{\raggedleft
% \begin{itemize}[leftmargin=*]
% \item calculating $x_1 - x_2$ when $x_1 \approx x_2$
% \item computing roots of a polynomial, given the coefficients
% \item computing eigenvalues of a non-symmetric matrix
% \end{itemize} }} &
%
%{\parbox{0.3\textwidth}{
% \begin{itemize}[leftmargin=*]
% \item relative condition number is \[ \mathcal{K} = ||A|| ||A^{-1}|| \]
% \end{itemize} }} \\ \hline
%\end{tabular}
%\end{table}
%
%
%\begin{table}[h]
%\begin{tabular}{|l|l|}
%\hline \textbf{Stable} & \textbf{Unstable} \\
%{\parbox{0.45\textwidth}{\raggedleft
% \begin{itemize}[leftmargin=*]
% \item finding eigenvalues of a symmetric (or normal) matrix
% \item calculating $e^x$ for relatively small values of $x$
% \item calculating $\ln(x)$ for $x$ not close to $1$
% \end{itemize} }} &
%
%{\parbox{0.45\textwidth}{\raggedleft
% \begin{itemize}[leftmargin=*]
% \item calculating $x_1 - x_2$ when $x_1 \approx x_2$
% \item computing roots of a polynomial, given the coefficients
% \item computing eigenvalues of a non-symmetric matrix
% \end{itemize} }} \\ \hline
%\end{tabular}
%\end{table}
%
| {
"alphanum_fraction": 0.7081463196,
"avg_line_length": 59.8120805369,
"ext": "tex",
"hexsha": "03317578016fb7a9754c1e39fe1b9b75ab1f77be",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "565cde92525ea44c55abe933c6419c1543f9800b",
"max_forks_repo_licenses": [
"CC-BY-3.0"
],
"max_forks_repo_name": "lcbendall/numerical_computing",
"max_forks_repo_path": "Algorithms/Conditioning_Stability/Conditioning_Stability.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "565cde92525ea44c55abe933c6419c1543f9800b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-3.0"
],
"max_issues_repo_name": "lcbendall/numerical_computing",
"max_issues_repo_path": "Algorithms/Conditioning_Stability/Conditioning_Stability.tex",
"max_line_length": 903,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "565cde92525ea44c55abe933c6419c1543f9800b",
"max_stars_repo_licenses": [
"CC-BY-3.0"
],
"max_stars_repo_name": "lcbendall/numerical_computing",
"max_stars_repo_path": "Algorithms/Conditioning_Stability/Conditioning_Stability.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5065,
"size": 17824
} |
\section{Results}
\label{sec:results}
This section reports the results in each user study and the overall result across all experimental setups. For each study, the coordinate differences in x, y, z, and overall distance between multiple skeletons are reported. The changes in the coordinate differences over time are also shown in the figures. The differences for each joint type are presented to display more details.
\subsection{Accessing the data}
The complete dataset and plots are publicly available at \url{https://github.com/cjw-charleswu/KinectMultiTrackDataset}
\subsection{Cleaning the data}
The logging data are post-processed for ease of plotting the results. The initial logging data, for all aforementioned studies, contain 244,527 rows and 87 columns in total. The final data for evaluation contain 243,550 rows and 87 columns. 977 rows are deleted for various reasons documented below.
\subsubsection{Logging error}
There is also an error in code where a scenario id is logged incorrectly. In the second part of scenario 8, when the second participant is asked to walk around the first participant, the scenario id is falsely written as 4. This logging error is corrected by replacing all occurrences of scenario id 4 that are immediately after scenario id 8 and before scenario id 5, which is the next task in line for the participants.
\subsubsection{Setting time intervals}
The tracker time is stored as the server's current time in milliseconds. The times for each user task (scenario) are reset. The current timestamps refer to the amount of time passed in each scenario, for a particular Kinect configuration and experiment.
The times are also converted from milliseconds to seconds.
\subsubsection{Converting joint coordinates}
The joint coordinates are converted from meters to centimeters.
\subsubsection{Removing singleton skeletons}
The studies are interested in the amount of coordinate differences between multiple skeletons after transformation during tracking. Thus, evaluation requires the joint positions of more than one skeletons (from different Kinect fields of view) at any given timestamp.
\subsection{Definitions}
$\Delta x$, $\Delta y$, $\Delta z$, $\Delta d$, Avg., and Std. are defined as:
\begin{description}
\item[$\Delta x$] The distance, or difference, between the x coordinates of a joint of multiple skeletons representing the same person from different Kinects fields of view, expressed in centimeters.
\item[$\Delta y$] The distance, or difference, between the y coordinates of a joint of multiple skeletons representing the same person from different Kinects fields of view, expressed in centimeters.
\item[$\Delta z$] The distance, or difference, between the z coordinates of a joint of multiple skeletons representing the same person from different Kinects fields of view, expressed in centimeters.
\item[$\Delta d$] The distance, or difference, between the x, y, and z coordinates of a joint of multiple skeletons representing the same person from different Kinects fields of view, expressed in centimeters.
\item[Avg.] Average (mean)
\item[Std.] Standard deviation
\end{description}
These values quantify the amount of differences produced by the tracking algorithm when transforming multiple skeletons of the same person to a single Kinect field of view.
Coordinates and joints distances are defined as:
\begin{description}
\item[Coordinates distances] The $\Delta x$, $\Delta y$, $\Delta z$, and $\Delta d$ distances averaged over a person's entire set of joints, expressed in centimeters.
\item[Joints distances] The $\Delta x$, $\Delta y$, $\Delta z$, and $\Delta d$ distances for each of a person's joints, expressed in centimeters.
\end{description}
%
% STATIONARY
%
\subsection{Stationary}
This section reports average coordinates and joints distances in the Stationary task with parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. Figure \ref{fig:stationary_coordinates_joints} displays plots showing such information. Table \ref{table:stationary_coordinates_values} summarizes the actual values of the average coordinates distances in the current task across different Kinect placements. The figures show how Kinect placements affect the average coordinates and joints distances during the Stationary task.
The $\Delta d$ distance in the Stationary task with parallel Kinects is $3.52$. The $\Delta d$ distance in the Stationary task with 45$^{\circ}$ apart Kinects is $3.95$. The $\Delta d$ distance in the Stationary task with 90$^{\circ}$ apart Kinects is $11.39$. The average $\Delta d$ distance in the Stationary task over all Kinect placements is $7.9$, with a standard deviation of $3.95$.
%
% STATIONARY - COORDINATES AND JOINTS DISTANCES
%
\begin{figure}[!htb]
\centering
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Coordinates_Task_Stationary}
\label{fig:stationary_coordinates}
} \\
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Joints_Task_Stationary}
\label{fig:stationary_joints}
}
\caption{\protect\subref{fig:stationary_coordinates} shows average coordinates distances in the Stationary task with Parallel, 45$^{\circ}$ and 90 $^{\circ}$ apart Kinects. The x axis is the Kinect placements. The y axis is the average distances in centimeters. \protect\subref{fig:stationary_joints} shows the average joints distances in the Stationary task averaged over all Kinect placements. The x axis is the joint types. They y axis is the average distances in centimeters.}
\label{fig:stationary_coordinates_joints}
% Now I refer to the figure \subref*{fig:stationary_coordinates}.
\end{figure}
%
% STATIONARY - COORDINATES VALUES
%
\begin{table}[!htb]
\centering
\begin{tabularx}{1.0\columnwidth}{||X c c c c||}
\hline
\textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
\hline\hline
Avg. $\Delta x$ & 1.84 & 3.38 & 7.30 & 4.17 \\
\hline
Std. $\Delta x$ & 1.03 & 1.52 & 2.94 & 2.82 \\
\hline
Avg. $\Delta y$ & 1.28 & 3.59 & 4.35 & 3.07 \\
\hline
Std. $\Delta y$ & 0.49 & 1.50 & 2.15 & 1.60 \\
\hline
Avg. $\Delta z$ & 2.08 & 3.17 & 5.1917 & 3.48 \\
\hline
Std. $\Delta z$ & 0.89 & 1.45 & 1.84 & 1.58 \\
\hline
Avg. $\Delta d$ & 3.52 & 6.95 & 11.39 & 7.29 \\
\hline
Std. $\Delta d$ & 1.33 & 2.67 & 4.45 & 3.95 \\
\hline
\end{tabularx}
\caption{Average coordinates distances in the Steps task with Parallel, 45$^{\circ}$ and 90$^{\circ}$ Kinects, as well as the average case. The means and standard deviations for $\Delta x$, $\Delta y$, $\Delta z$, and $\Delta d$ are reported.}
\label{table:stationary_coordinates_values}
\end{table}
%
% STEPS
%
\subsection{Steps}
This section reports average coordinates and joints distances in the Steps task with parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. Figure \ref{fig:steps_coordinates_joints} displays plots showing such information. Table \ref{table:steps_coordinates_values} summarizes the actual values of the average coordinates distances in the current task across different Kinect placements. The figures show how Kinect placements affect the average coordinates and joints distances during the Steps task.
The $\Delta d$ distance in the Steps task with parallel Kinects is $6.87$. The $\Delta d$ distance in the Steps task with 45$^{\circ}$ apart Kinects is $12.80$. The $\Delta d$ distance in the Steps task with 90$^{\circ}$ apart Kinects is $25.13$. The average $\Delta d$ distance in the Steps task over all Kinect placements is $14.93$, with a standard deviation of $9.32$.
%
% STEPS - COORDINATES AND JOINTS DISTANCES
%
\begin{figure}[!htb]
\centering
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Coordinates_Task_Steps}
\label{fig:steps_coordinates}
} \\
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Joints_Task_Steps}
\label{fig:steps_joints}
}
\caption{\protect\subref{fig:steps_coordinates} shows average coordinates distances in the Steps task with Parallel, 45$^{\circ}$ and 90 $^{\circ}$ apart Kinects. The x axis is the Kinect placements. The y axis is the average distances in centimeters. \protect\subref{fig:steps_joints} shows the average joints distances in the Steps task averaged over all Kinect placements. The x axis is the joint types. They y axis is the average distances in centimeters.}
\label{fig:steps_coordinates_joints}
\end{figure}
%
% STEPS - COORDINATES VALUES
%
\begin{table}[!htb]
\centering
\begin{tabularx}{1.0\columnwidth}{||X c c c c||}
\hline
\textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
\hline\hline
Avg. $\Delta x$ & 4.48 & 8.18 & 16.7 & 9.78 \\
\hline
Std. $\Delta x$ & 0.53 & 0.70 & 1.70 & 6.25 \\
\hline
Avg. $\Delta y$ & 2.13 & 4.11 & 5.20 & 3.81 \\
\hline
Std. $\Delta y$ & 0.32 & 0.86 & 2.07 & 1.56 \\
\hline
Avg. $\Delta z$ & 3.58 & 6.47 & 13.83 & 7.96 \\
\hline
Std. $\Delta z$ & 0.95 & 1.77 & 1.95 & 5.28 \\
\hline
Avg. $\Delta d$ & 6.87 & 12.80 & 25.13 & 14.93 \\
\hline
Std. $\Delta d$ & 0.90 & 1.92 & 3.46 & 9.32 \\
\hline
\end{tabularx}
\caption{Average coordinates distances in the Steps task with Parallel, 45$^{\circ}$ and 90$^{\circ}$ Kinects, as well as the average case. The means and standard deviations for $\Delta x$, $\Delta y$, $\Delta z$, and $\Delta d$ are reported.}
\label{table:steps_coordinates_values}
\end{table}
%
% WALK
%
\subsection{Walk}
This section reports average coordinates and joints distances in the Walk task with parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. Figure \ref{fig:walk_coordinates_joints} displays plots showing such information. Table \ref{table:walk_coordinates_values} summarizes the actual values of the average coordinates distances in the current task across different Kinect placements. The figures show how Kinect placements affect the average coordinates and joints distances during the Walk task.
The $\Delta d$ distance in the Walk task with parallel Kinects is $10.17$. The $\Delta d$ distance in the Walk task with 45$^{\circ}$ apart Kinects is $17.67$. The $\Delta d$ distance in the Walk task with 90$^{\circ}$ apart Kinects is $32.38$. The average $\Delta d$ distance in the Walk task over all Kinect placements is $20.07$, with a standard deviation of $11.30$.
\begin{figure}[!htb]
\centering
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Coordinates_Task_Walk}
\label{fig:walk_coordinates}
} \\
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Joints_Task_Walk}
\label{fig:walk_joints}
}
\caption{\protect\subref{fig:walk_coordinates} shows average coordinates distances in the Walk task with Parallel, 45$^{\circ}$ and 90 $^{\circ}$ apart Kinects. The x axis is the Kinect placements. The y axis is the average distances in centimeters. \protect\subref{fig:walk_joints} shows the average joints distances in the Walk task averaged over all Kinect placements. The x axis is the joint types. They y axis is the average distances in centimeters.}
\label{fig:walk_coordinates_joints}
\end{figure}
\begin{table}[!htb]
\centering
\begin{tabularx}{1.0\columnwidth}{||X c c c c||}
\hline
\textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
\hline\hline
Avg. $\Delta x$ & 5.76 & 10.18 & 21.02 & 12.32 \\
\hline
Std. $\Delta x$ & 0.97 & 1.16 & 1.73 & 7.85 \\
\hline
Avg. $\Delta y$ & 3.17 & 5.78 & 5.47 & 4.81 \\
\hline
Std. $\Delta y$ & 0.57 & 0.70 & 0.96 & 1.42 \\
\hline
Avg. $\Delta z$ & 6.04 & 9.94 & 19.03 & 11.67 \\
\hline
Std. $\Delta z$ & 0.95 & 1.69 & 2.07 & 6.67 \\
\hline
Avg. $\Delta d$ & 10.17 & 17.67 & 32.38 & 20.07 \\
\hline
Std. $\Delta d$ & 1.64 & 2.37 & 3.38 & 11.30 \\
\hline
\end{tabularx}
\caption{Average coordinates distances in the Walk task with Parallel, 45$^{\circ}$ and 90$^{\circ}$ Kinects, as well as the average case. The means and standard deviations for $\Delta x$, $\Delta y$, $\Delta z$, and $\Delta d$ are reported.}
\label{table:walk_coordinates_values}
\end{table}
\FloatBarrier
%
% STATIONARY, STEPS, WALK
%
\subsection{Stationary, Steps, Walk}
This section summarizes the results in the Stationary, Steps, and Walk tasks, coupled with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects.
Figure \ref{fig:results_three_coordinates_joints} shows two different plots. Firstly, it shows the average coordinates and joints distances in the Stationary, Steps, and Walk tasks averaged over different Kinect placements (Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects). The reverse is also shown. The figure also shows the average coordinates and joints distances with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects averaged over different tasks (Stationary, , Steps, and Walk). The figure shows how the complexity of tasks and the placement of the Kinects, respectively, affect the accuracy of the tracking algorithm.
\begin{figure*}[!htb]
\centering
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Coordinates_Kinect_All}
}
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Joints_Kinect_All}
} \\
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Coordinates_Task_All}
}
\subfloat[]{
\includesvg[width=1.0\columnwidth]{Joints_Task_All}
} \\
\subfloat[]{
\begin{tabularx}{1.0\columnwidth}{||X c c c c||}
\hline
\textbf{Distances} & \textbf{Stationary} & \textbf{Steps} & \textbf{Walk} & \textbf{Average} \\ [0.5ex]
\hline\hline
Avg. $\Delta x$ & 0 & 0 & 0 & 0 \\
\hline
Std. $\Delta x$ & 0 & 0 & 0 & 0 \\
\hline
Avg. $\Delta y$ & 0 & 0 & 0 & 0 \\
\hline
Std. $\Delta y$ & 0 & 0 & 0 & 0 \\
\hline
Avg. $\Delta z$ & 0 & 0 & 0 & 0 \\
\hline
Std. $\Delta z$ & 0 & 0 & 0 & 0 \\
\hline
Avg. $\Delta d$ & 0 & 0 & 0 & 0 \\
\hline
Std. $\Delta d$ & 0 & 0 & 0 & 0 \\
\hline
\end{tabularx}
}
\subfloat[]{
\begin{tabularx}{1.0\columnwidth}{||X c c c c||}
\hline
\textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
\hline\hline
Avg. $\Delta x$ & 4.03 & 7.25 & 15.00 & 6.57 \\
\hline
Std. $\Delta x$ & 2.00 & 3.50 & 7.00 & 6.35 \\
\hline
Avg. $\Delta y$ & 2.19 & 4.49 & 5.01 & 2.92 \\
\hline
Std. $\Delta y$ & 0.95 & 1.15 & 0.58 & 2.30 \\
\hline
Avg. $\Delta z$ & 3.90 & 6.53 & 12.68 & 5.78 \\
\hline
Std. $\Delta z$ & 2.00 & 3.39 & 6.99 & 5.33 \\
\hline
Avg. $\Delta d$ & 6.85 & 12.47 & 22.97 & 10.57 \\
\hline
Std. $\Delta d$ & 3.32 & 5.36 & 10.66 & 9.71 \\
\hline
\end{tabularx}
}
\caption{Overall results comparing the effects of difficulty of tasks and Kinect placements on the average coordinates and joints distances. Average coordinates and joints distances in the Stationary, Steps, and Walk tasks averaged over Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. Average coordinates and joints distances with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects averaged over Stationary, Steps, and Walk tasks.}
\label{fig:results_three_coordinates_joints}
\end{figure*}
% \begin{table*}[!htb]
% \centering
% % \subfloat[]{
% % \begin{tabularx}{1.0\columnwidth}{||X c c c c||}
% % \hline
% % \textbf{Distances} & \textbf{Stationary} & \textbf{Steps} & \textbf{Walk} & \textbf{Average} \\ [0.5ex]
% % \hline\hline
% % Avg. $\Delta x$ & 0 & 0 & 0 & 0 \\
% % \hline
% % Std. $\Delta x$ & 0 & 0 & 0 & 0 \\
% % \hline
% % Avg. $\Delta y$ & 0 & 0 & 0 & 0 \\
% % \hline
% % Std. $\Delta y$ & 0 & 0 & 0 & 0 \\
% % \hline
% % Avg. $\Delta z$ & 0 & 0 & 0 & 0 \\
% % \hline
% % Std. $\Delta z$ & 0 & 0 & 0 & 0 \\
% % \hline
% % Avg. $\Delta d$ & 0 & 0 & 0 & 0 \\
% % \hline
% % Std. $\Delta d$ & 0 & 0 & 0 & 0 \\
% % \hline
% % \end{tabularx}
% % }
% % \subfloat[]{
% % \begin{tabularx}{1.0\columnwidth}{||X c c c c||}
% % \hline
% % \textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
% % \hline\hline
% % Avg. $\Delta x$ & 4.03 & 7.25 & 15.00 & 6.57 \\
% % \hline
% % Std. $\Delta x$ & 2.00 & 3.50 & 7.00 & 6.35 \\
% % \hline
% % Avg. $\Delta y$ & 2.19 & 4.49 & 5.01 & 2.92 \\
% % \hline
% % Std. $\Delta y$ & 0.95 & 1.15 & 0.58 & 2.30 \\
% % \hline
% % Avg. $\Delta z$ & 3.90 & 6.53 & 12.68 & 5.78 \\
% % \hline
% % Std. $\Delta z$ & 2.00 & 3.39 & 6.99 & 5.33 \\
% % \hline
% % Avg. $\Delta d$ & 6.85 & 12.47 & 22.97 & 10.57 \\
% % \hline
% % Std. $\Delta d$ & 3.32 & 5.36 & 10.66 & 9.71 \\
% % \hline
% % \end{tabularx}
% % }
% \caption{Average coordinates distances in the Walk task for three different Kinect configurations}
% \label{table:results_three_coordinates_values}
% \end{table*}
\FloatBarrier
Figure \ref{fig:results_three_joints_over_time} shows the average joints distances over time in the Stationary, Steps, and Walk tasks with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. These figures demonstrate the stability of the tracking algorithm for different joints over time when performing different tasks in different Kinect placements. The figures are taken from participant 18.
\begin{figure*}[!htb]
\centering
\subfloat[Stationary]{
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Stationary_Kinect_Parallel_Coordinates}
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Stationary_Kinect_45_Coordinates}
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Stationary_Kinect_90_Coordinates}
} \\
\subfloat[Steps]{
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Steps_Kinect_Parallel_Coordinates}
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Steps_Kinect_45_Coordinates}
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Steps_Kinect_90_Coordinates}
} \\
\subfloat[Walk]{
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Walk_Kinect_Parallel_Coordinates}
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Walk_Kinect_Parallel_Coordinates}
\includesvg[width=0.666\columnwidth]{Participant_19_Task_Walk_Kinect_Parallel_Coordinates}
}
\caption{Average coordinates distances over time in the Stationary, Steps, and Walk tasks with Parallel, 45$^{\circ}$, and 90 $^{\circ}$ apart Kinects.}
\label{fig:results_three_joints_over_time}
\end{figure*}
\FloatBarrier
Figure \ref{fig:results_three_joints_over_time_heatmap} shows the average joints distances over time in the Stationary, Steps, and Walk tasks with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. These figures demonstrate the accuracy of the tracking algorithm in different scenarios. If The joints distances are aligned over the time domain
\begin{figure*}[!htb]
\centering
\subfloat[Stationary]{
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Stationary_Kinect_Parallel}
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Stationary_Kinect_45}
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Stationary_Kinect_90}
} \\
\subfloat[Steps]{
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Steps_Kinect_Parallel}
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Steps_Kinect_45}
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Steps_Kinect_90}
} \\
\subfloat[Walk]{
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Walk_Kinect_Parallel}
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Walk_Kinect_45}
\includesvg[width=0.66\columnwidth]{Participant_19_Task_Walk_Kinect_90}
}
\caption{Average joints distances in the Stationary, Steps, and Walk tasks with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. The figures show heatmaps over different joint types.}
\label{fig:results_three_joints_over_time}
\end{figure*}
\FloatBarrier
\subsection{Obstacle}
\textbf{show screenshots}
\subsection{Occlusion through interaction}
\textbf{show screenshots}
\subsection{Overall}
\begin{figure*}[!htb]
\centering
\includesvg[width=2.0\columnwidth]{Coordinates_All}
\caption{Overall result}
\label{fig:results_overall}
\end{figure*}
\begin{table}[!htb]
\centering
\begin{tabularx}{1.0\columnwidth}{||X c c c c||}
\hline
\textbf{Setup} & Avg. $\Delta x$ & Avg. $\Delta y$ & Avg. $\Delta z$ & Avg. $\Delta d$ \\ [0.5ex]
\hline\hline
Parallel, Stationery & 1.84 & 3.38 & 7.30 & 4.17 \\
\hline
Std. $\Delta x$ & 1.03 & 1.52 & 2.94 & 2.82 \\
\hline
& 1.28 & 3.59 & 4.35 & 3.07 \\
\hline
Std. $\Delta y$ & 0.49 & 1.50 & 2.15 & 1.60 \\
\hline
& 2.08 & 3.17 & 5.1917 & 3.48 \\
\hline
Std. $\Delta z$ & 0.89 & 1.45 & 1.84 & 1.58 \\
\hline
& 3.52 & 6.95 & 11.39 & 7.29 \\
\hline
Std. $\Delta d$ & 1.33 & 2.67 & 4.45 & 3.95 \\
\hline
\end{tabularx}
\caption{Average coordinates distances in the Walk task for three different Kinect placements}
\label{table:overall_coordinates_values}
\end{table}
\FloatBarrier
Maybe add table for average and standard deviation of delta d for each joint
% Table \ref{table:stationary_joints_values} shows the average joints distances in the Stationary task for Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects.
% \begin{table}[!htb]
% \centering
% \begin{tabularx}{1.0\columnwidth}{||X c c c c||}
% \hline
% \textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
% \hline\hline
% AnkleLeft $\Delta d$ & 4.98 & 12.49 & 13.69 & 10.39 \\
% \hline
% AnkleRight $\Delta d$ & 4.36 & 8.86 & 8.40 & 7.21 \\
% \hline
% ElbowLeft $\Delta d$ & 2.20 & 4.40 & 16.65 & 7.75 \\
% \hline
% ElbowRight $\Delta d$ & 3.10 & 5.37 & 8.75 & 5.7367 \\
% \hline
% FootLeft $\Delta d$ & 5.66 & 14.21 & 15.74 & 11.87 \\
% \hline
% FootRight $\Delta d$ & 4.36 & 9.64 & 8.34 & 7.45 \\
% \hline
% HandLeft $\Delta d$ & 3.00 & 6.39 & 18.61 & 9.34 \\
% \hline
% HandRight $\Delta d$ & 3.27 & 5.36 & 10.67 & 6.43 \\
% \hline
% HandTipLeft $\Delta d$ & 3.40 & 7.33 & 19.74 & 10.16 \\
% \hline
% HandTipRight $\Delta d$ & 3.40 & 5.75 & 11.44 & 6.86 \\
% \hline
% Head $\Delta d$ & 5.65 & 9.29 & 13.41 & 9.45 \\
% \hline
% HipLeft $\Delta d$ & 1.40 & 3.69 & 7.04 & 4.04 \\
% \hline
% HipRight $\Delta d$ & 1.27 & 3.91 & 7.03 & 4.07 \\
% \hline
% KneeLeft $\Delta d$ & 3.52 & 9.38 & 10.92 & 7.94 \\
% \hline
% KneeRight $\Delta d$ & 2.94 & 6.45 & 6.49 & 5.29 \\
% \hline
% Neck $\Delta d$ & 3.67 & 5.72 & 6.53 & 5.31 \\
% \hline
% ShoulderLeft $\Delta d$ & 3.40 & 5.76 & 9.84 & 6.33 \\
% \hline
% ShoulderRight $\Delta d$ & 3.12 & 5.57 & 7.72 & 5.47 \\
% \hline
% SpineBase $\Delta d$ & 1.47 & 5.17 & 9.15 & 5.26 \\
% \hline
% SpineMid $\Delta d$ & 2.47 & 4.24 & 7.59 & 4.77 \\
% \hline
% SpineShoulder $\Delta d$ & 3.38 & 5.22 & 6.68 & 5.09 \\
% \hline
% ThumbLeft $\Delta d$ & 5.69 & 9.60 & 19.65 & 11.65 \\
% \hline
% ThumbRight $\Delta d$ & 6.15 & 8.69 & 11.37 & 8.74 \\
% \hline
% WristLeft $\Delta d$ & 2.87 & 5.93 & 19.04 & 9.28 \\
% \hline
% WristRight $\Delta d$ & 3.21 & 5.43 & 10.39 & 6.34 \\
% \hline
% \end{tabularx}
% \caption{Average $\Delta d$ for each of the joints in the Stationary task with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. The average shows the mean $\Delta d$ distance of each joint over the three different Kinect placements.}
% \label{table:stationary_joints_values}
% \end{table}
% \FloatBarrier
% Table \ref{table:steps_joints_values} shows the average joints distances in the Steps task for Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects.
% \begin{table}[!htb]
% \centering
% \begin{tabularx}{1.0\columnwidth}{||X c c c c||}
% \hline
% \textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
% \hline\hline
% AnkleLeft $\Delta d$ & 8.18 & 16.51 & 28.2 & 17.63 \\
% \hline
% AnkleRight $\Delta d$ & 6.99 & 13.48 & 23.53 & 14.67 \\
% \hline
% ElbowLeft $\Delta d$ & 6.41 & 12.60 & 29.14 & 16.05 \\
% \hline
% ElbowRight $\Delta d$ & 6.12 & 10.84 & 22.10 & 13.02 \\
% \hline
% FootLeft $\Delta d$ & 9.00 & 17.98 & 30.50 & 19.16 \\
% \hline
% FootRight $\Delta d$ & 7.40 & 13.56 & 23.62 & 14.86 \\
% \hline
% HandLeft $\Delta d$ & 6.92 & 13.94 & 30.47 & 17.11 \\
% \hline
% HandRight $\Delta d$ & 6.15 & 11.02 & 22.69 & 13.29 \\
% \hline
% HandTipLeft $\Delta d$ & 7.33 & 14.40 & 31.53 & 17.75 \\
% \hline
% HandTipRight $\Delta d$ & 6.23 & 11.23 & 23.16 & 13.54 \\
% \hline
% Head $\Delta d$ & 8.24 & 14.09 & 25.27 & 15.87 \\
% \hline
% HipLeft $\Delta d$ & 5.93 & 11.05 & 22.19 & 13.05 \\
% \hline
% HipRight $\Delta d$ & 5.64 & 10.85 & 22.57 & 13.02 \\
% \hline
% KneeLeft $\Delta d$ & 6.81 & 13.76 & 26.56 & 15.71 \\
% \hline
% KneeRight $\Delta d$ & 6.25 & 11.79 & 22.73 & 13.59 \\
% \hline
% Neck $\Delta d$ & 6.71 & 11.63 & 22.18 & 13.50 \\
% \hline
% ShoulderLeft $\Delta d$ & 6.81 & 12.55 & 25.40 & 14.92 \\
% \hline
% ShoulderRight $\Delta d$ & 6.28 & 11.44 & 22.08 & 13.27 \\
% \hline
% SpineBase $\Delta d$ & 5.95 & 11.53 & 22.58 & 13.35 \\
% \hline
% SpineMid $\Delta d$ & 6.14 & 11.10 & 22.20 & 13.15 \\
% \hline
% SpineShoulder $\Delta d$ & 6.55 & 11.44 & 22.16 & 13.39 \\
% \hline
% ThumbLeft $\Delta d$ & 8.33 & 15.90 & 31.55 & 18.59 \\
% \hline
% ThumbRight $\Delta d$ & 8.19 & 12.65 & 23.31 & 14.72 \\
% \hline
% WristLeft $\Delta d$ & 6.98 & 13.66 & 30.02 & 16.88 \\
% \hline
% WristRight $\Delta d$ & 6.19 & 10.95 & 22.54 & 13.23 \\
% \hline
% \end{tabularx}
% \caption{Average $\Delta d$ for each of the joints in the Steps task with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. The average shows the mean $\Delta d$ distance of each joint over the three different Kinect placements.}
% \label{table:steps_joints_values}
% \end{table}
% \FloatBarrier
% Table \ref{table:walk_joints} shows the average joints distances in the Walk task for Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects.
% \begin{table}[!htb]
% \centering
% \begin{tabularx}{1.0\columnwidth}{||X c c c c||}
% \hline
% \textbf{Distances} & \textbf{Parallel} & \textbf{45$^{\circ}$} & \textbf{90$^{\circ}$} & \textbf{Average} \\ [0.5ex]
% \hline\hline
% AnkleLeft $\Delta d$ & 11.65 & 19.72 & 34.21 & 21.86 \\
% \hline
% AnkleRight $\Delta d$ & 10.76 & 18.65 & 32.66 & 20.69 \\
% \hline
% ElbowLeft $\Delta d$ & 10.46 & 18.80 & 36.77 & 22.01 \\
% \hline
% ElbowRight $\Delta d$ & 9.16 & 16.33 & 30.89 & 18.80 \\
% \hline
% FootLeft $\Delta d$ & 12.60 & 21.48 & 36.61 & 23.56 \\
% \hline
% FootRight $\Delta d$ & 11.47 & 19.52 & 33.60 & 21.53 \\
% \hline
% HandLeft $\Delta d$ & 11.64 & 20.53 & 37.43 & 23.20 \\
% \hline
% HandRight $\Delta d$ & 10.53 & 17.27 & 32.02 & 19.94 \\
% \hline
% HandTipLeft $\Delta d$ & 12.11 & 21.19 & 37.81 & 23.70 \\
% \hline
% HandTipRight $\Delta d$ & 10.93 & 17.71 & 32.51 & 20.38 \\
% \hline
% Head $\Delta d$ & 9.41 & 16.71 & 30.67 & 18.93 \\
% \hline
% HipLeft $\Delta d$ & 8.30 & 15.43 & 29.12 & 17.61 \\
% \hline
% HipRight $\Delta d$ & 8.08 & 14.55 & 28.00 & 16.88 \\
% \hline
% KneeLeft $\Delta d$ & 10.08 & 17.39 & 31.74 & 19.74 \\
% \hline
% KneeRight $\Delta d$ & 9.08 & 16.28 & 29.82 & 18.39 \\
% \hline
% Neck $\Delta d$ & 8.34 & 14.79 & 28.32 & 17.15 \\
% \hline
% ShoulderLeft $\Delta d$ & 9.86 & 17.73 & 34.08 & 20.56 \\
% \hline
% ShoulderRight $\Delta d$ & 8.52 & 15.27 & 28.71 & 17.50 \\
% \hline
% SpineBase $\Delta d$ & 8.02 & 14.94 & 28.51 & 17.15 \\
% \hline
% SpineMid $\Delta d$ & 7.82 & 14.43 & 28.24 & 16.83 \\
% \hline
% SpineShoulder $\Delta d$ & 8.16 & 14.63 & 28.28 & 17.02 \\
% \hline
% ThumbLeft $\Delta d$ & 13.53 & 22.07 & 37.68 & 24.43 \\
% \hline
% ThumbRight $\Delta d$ & 12.02 & 18.88 & 32.56 & 21.15 \\
% \hline
% WristLeft $\Delta d$ & 11.53 & 20.36 & 37.40 & 23.10 \\
% \hline
% WristRight $\Delta d$ & 10.11 & 16.99 & 31.84 & 19.64 \\
% \hline
% \end{tabularx}
% \caption{Average $\Delta d$ for each of the joints in the Walk task with Parallel, 45$^{\circ}$ and 90$^{\circ}$ apart Kinects. The average shows the mean $\Delta d$ distance of each joint over the three different Kinect placements.}
% \label{table:walk_joints}
% \end{table}
% \FloatBarrier | {
"alphanum_fraction": 0.6544038388,
"avg_line_length": 43.6403641882,
"ext": "tex",
"hexsha": "7fdcb275e6ddece8c40ef6ae3b6f257b356699fa",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2017-10-17T10:23:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-04-14T09:06:38.000Z",
"max_forks_repo_head_hexsha": "4b61d096d5db98c30481ac096af06b81029628ab",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cjw-charleswu/Tiny",
"max_forks_repo_path": "Deliverables/Report/Archive/Old/Sections/results.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4b61d096d5db98c30481ac096af06b81029628ab",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cjw-charleswu/Tiny",
"max_issues_repo_path": "Deliverables/Report/Archive/Old/Sections/results.tex",
"max_line_length": 635,
"max_stars_count": 7,
"max_stars_repo_head_hexsha": "4b61d096d5db98c30481ac096af06b81029628ab",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cjw-charleswu/Tiny",
"max_stars_repo_path": "Deliverables/Report/Archive/Old/Sections/results.tex",
"max_stars_repo_stars_event_max_datetime": "2017-09-15T20:07:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-01-30T10:16:40.000Z",
"num_tokens": 10296,
"size": 28759
} |
\section{Differential Rings and Modules}
Recalling our definition of differential rings (an associative algebra with derivation Lie algebra) we wish to extend our set of notations.
\subsection{Differential modules and ideals}
\begin{defi}
Let $(R,D)$ be a differential ring.
\bn
\item A differential module $(M,D_M)$ is an $R$-module with an additive map:
$$D : M \longrightarrow M,\ r m \longmapsto \partial_R(r) m + r D(m)\ \forall D \in D_M,\ r \in R,\ m \in M,$$
where $\partial_R \in \trm{Der}(R) = \left<D\right>$.
\item A differential ideal $I \subset R$ is a differential $R$-submodule of $R$, i.e. $\partial(I) \subset I$ for all $\partial \in D$.
\item Let $(R,D_R)$ and $(S,D_S)$ be two differential rings. A ring homomorphism $f : R \longrightarrow S$ is called differential if
$$\partial_S \circ f = f \circ \partial_R$$
or equivalently the following diagram commutes:
$$\xymatrix{
R \ar[d]_{\partial_R} \ar[r]^f & S\ar[d]^{\partial_S}\\
R \ar[r]_{f\mid_{\partial_R(R)}} & S.\\
}$$
%where $\partial_X(X) := \trm{im} \partial_X$ for $X = R, S$.
\item A differential ring $(R,D)$ is called simple differential if it has no proper differential ideals.
\item A differential field is a differential ring with no proper ideals (differential or non-differential).
\item Let $(R,D)$ be a differential ring. The subset
$$R^\partial := \{x \in R : \partial(x) = 0\ \forall \partial \in D\}$$
defines a subring and is called the ring of constants.
\en
\index{Index}{module!differential}
\index{Index}{ring!differential}
\index{Index}{ring!differential!homomorphism of}
\index{Index}{ring!differential!simple}
\index{Index}{ring!differential! of constants}
\index{Index}{field!of constants}
\index{Symbol}{$\trm{Der}_R(A)$}
\index{Symbol}{$R^\partial$}
\end{defi}
\bmk As mentioned, the derivation maps $D$ on differential modules $(M,D_M)$ are only additive (are in general not in $\trm{End}_R(M)$, but in $\trm{End}_{R^\partial}(M)$ - i.e. its ring of constants). This is due to the Leibniz-rule as defined above.\\
\indent The ring of constants $R^\partial$ for a differential field $(R, D)$ is also a field, since:
$$\partial(a a^{-1}) = 0 = \underbrace{\partial(a)}_{=0} a^{-1} + a \partial(a^{-1}) \LRA a \partial(a^{-1}) = 0$$
holds for all $a \in R^\times$.
\bsp Some prominent examples:
\bn
\item any ring $R$ is a (trivial) differential ring, via $0 : R \longrightarrow 0$. Thus, all $R$-modules are also differential modules via the zero-homomorphism.
\item The polynomial ring in one indeterminate:
$$\left(k[X], \partial = \frac{d}{dX}\right),$$
$k$ a field with characteristic zero, is a simple differential ring since all ideals $I$ generated by some polynomial of degree greater zero eventually fulfill $\partial^i(I) = (1)$. On the other hand, $(k[X], \partial = X \frac{d}{dX})$ has non-trivial differential ideals:
$$I_i = \left<X^i\right>,\ \forall i \geq 1$$
as each $k$-sub vector space $k.X^i$ is $\partial$-stable.
\item Let $p \in \nz$ be a prime number, the polynomial ring $\mathbb{F}_p[X]$ with $\mathbb{F}_p$-derivation $\partial = \frac{d}{d X}$, as in the last example, has an interesting property. Its ring of constants is $\mathbb{F}_p[X^p]$ as $\partial(X^p) = p X^{p-1} = 0$ and this ring contains indeed non-trivial differential ideals (in contrast to characteristic zero fields):
$$I_k := \left<X^{p^k}\right>,\ k \geq 1$$% : k \geq 1\right>,$$
since $\partial(X^{p^k} f + X^{p^k} g) = \partial(X^{p^k} f) + \partial(X^{p^k} g) = X^{p^k} \partial(f + g)$ for all $f, g \in \mathbb{F}_p[X]$.
\item The field of rational functions $k(X)$ is a differential field with derivation Lie algebra generated by
$$D = \left\{\partial'_x = \left[\frac{f}{g} \mapsto \frac{\partial_x(f) g - f \partial_x(g)}{g^2}\right] : x \in X\right\}.$$
\item Let $U \subset \rz^n$ be open and connected then $(C^\infty(U),D=\{\partial_i : 1 \leq i \leq n\})$ is a partial differential ring with a non-trivial differential ideal $\mathfrak{m} := \{f \in C^\infty(U) : \trm{supp} (f) \subsetneq U\}$, as we may define a proper differential ideal for all $f \in C^\infty(U) \bsl \rz[\{x\}]$ by simply putting
$$\left<\{f\}\right> = \left<\partial_{i_1} \circ \hdots \circ \partial_{i_k}(f) : 1\leq i_j \leq n,\ k \in \nz_0\right>,$$
where $\rz[\{x\}]$ denotes the ring of real convergent power series in $U$. These ideals are, in general, not finitely generated.
\en
\subsection{General Differential Algebra}
Let $(k,D)$ be a differential ring with $D = \{\partial\}$ and $k^\partial$ its ring of constants.
\subsubsection{Ring of differential operators}
In terms of Ore extension, the subring $D := k[\partial] \subset \trm{End}_{k^\partial}(k)$ is isomorphic to the Ore-extension $k[X,id_k,\partial]$ over $k$. To show this we depend on its intrinsic module algebra structure:\\
$$\Psi := \Psi_{\trm{int}} \mid_{D \otimes k.id_D} = \left[d' \longmapsto \left[d \longmapsto \mu_D(d' \otimes d) = d'(d)\right]\right],$$
where $\Psi_{\trm{int}} : D \otimes \trm{End}_{k^\partial}(D) \longrightarrow \trm{End}_{k^\partial}(D)$, restricted to the subalgebra $k.id_D$.
\indent $k[\partial]$ has the structure of a pointed-irreducible bialgebra, of Birkhoff-Witt type (only $1_{k[\partial]}$ as group-like, generator $X \in \mathfrak{g}_{k^\partial}(k)$ has primitive coproduct and $(k[\partial],\mu,\eta)$ is isomorphic to the enveloping algebra of some Lie-algebra). Its structure map
$$\Psi : D \otimes D \longrightarrow D,\ \partial \otimes a \partial^i \longmapsto \partial(a) \partial^i + a \partial^{i+1}.$$
To see the isomorphism wrt. Ore-extensions, we consider the $k$-left module $k.\partial^i$ with its above described $k$-right module structure:
$$\partial^i \otimes a \sim \sum_{j=0}^i \left(\bao{c}i\\j\\\ea\right) \partial^j(a) \otimes \partial^{i-j}$$
Hence, the ideal generators $1 \otimes X \otimes a - a \otimes X \otimes 1 - \partial(a) \in \bigoplus_{n \geq 0} (k \otimes \partial)^{\otimes n} \otimes k$, as in \ref{prop03} on pg. \pageref{prop03}, yield - evaluated via $id_k \otimes \Psi : k \otimes D \otimes k \longrightarrow k \otimes k \simeq k$:
$$id \otimes \Psi(1 \otimes X \otimes a - a \otimes X \otimes 1 - \partial(a) \otimes 1_D \otimes 1_k) = X(a) - a X(1) - \partial(a).$$
Identifying $X = \partial$, we get indeed zero as desired. Hence, the quotient yields our desired Ore-extension $k[X,id_k,\partial] \simeq k[\partial]$. There are more interesting properties for $k[\partial]$ that are discussed in \cite{vdPS01}, chapter 2, in greater detail.\\
\indent Concluding this subsection, we note that $k[\partial]$ is a unital, associative, coassociative, cocommutative bialgebra (indeed, has an antipode, which will be discussed below) acting on $k$ via evaluation.
\subsubsection{Ring of differential polynomials}\label{RingOfDiffPolys}
As above we are using a differential field $(k,\partial)$, $k^\partial$ its ring of constants and its ring of differential operators $k[\partial]$ being generated by one element $\partial$.%, pick some differential field $(k,\partial)$ and
We consider the ring of polynomials $R := k[u_1,\ldots,u_n]$, i.e. a noetherian ring over $k$. In general, we have no unique extension of $\partial$ to $R$ except for that of a trivial differential ring: $u_i \longmapsto 0$ (enlarging the ring of constants to $R^\partial[u_1,\ldots,u_n]$). However, we may use a non-noetherian transcendental extension $S$ over $R$
$$S := k[u_{i,j} : 1 \leq i \leq n, j \in \nz_0],$$ where $u_{i} \longmapsto u_{i,0}$ defines an embedding of $R$ in $S$. Indeed, this ring still has no canonical $D = k[\partial]$-module algebra structure. However, the module:
$$D \otimes_k S := D \otimes_{k^\partial} S/\left<d \otimes x s - \sum_{(d)} d_{(1)}(x)d_{(2)} \otimes s : x \in k, d \in D, s \in S\right>,$$
where $\Delta(d) = \sum_{(d)} d_{(1)} \otimes d_{(2)}$, has:
$$\partial^i \otimes f_\alpha u^\alpha \sim_\partial \sum_{j=0}^i\left(\bao{c}i\\j\\\ea\right) \partial^j(f_\alpha) \partial^{i-j}\otimes u^\alpha.$$
Its module algebra structure map $\Psi : D \otimes_k (D \otimes_k S) \longrightarrow D \otimes_k S$ is given by the above equivalence relation. The ideal
$$I := \left<\partial^{\alpha_3} \otimes u_{\alpha_1,\alpha_2} - \partial^\beta \otimes u_{\alpha_1,\alpha_2 + \alpha_3 - \beta} : 1 \leq \alpha_1 \leq n, \alpha_2, \alpha_3 \in \nz_0, 0 \leq \beta \leq \alpha_3 - 1\right> \subset D \otimes_k S$$ gives us our desired:
\begin{defi}[Ring of differential polynomials]
Let $S$ and $I$ be defined as above. The quotient ring $D \otimes_k S/I$ is called the ring of differential polynomials and is denoted by:
$$k\left\{u_1,\ldots,u_n\right\} := D \otimes_k S/I.$$
\end{defi}
\bmk %Firstly, we remark that if $\partial u_{i,j} = u_{i,j+1} \in K\{u\}$ then also
%$$\partial^l(u_{i,j}) = \partial^{l-1}(u_{i,j+1}) = \ldots = u_{i,j+l} \in k\{u\}\ \forall 1 \leq i \leq n,\ j \geq 0,\ l \geq 1.$$
Sometimes we may use $u_i^{(j)}$ instead of $u_{i,j}$. Secondly, note that this, indeed, defines a (non-noetherian) differential ring, with derivations $\{\partial, \partial_{u_{i,j}}\}$. Here, $\partial_{u_{i,j}}$ are $k$-derivations, while $\partial$ is a $k^\partial$-derivation. Additionally, we may still recover $R$:
$$R \simeq k\{u_1,\ldots,u_n\}/J,\ \trm{where}\ J := \left<u_{i,1} - 1, u_{i,j} : 1 \leq i \leq n, j \geq 2\right> \subset k\{u_{i,j}\}.$$
\indent The definition naturally translates to differential rings - if $(R,\partial)$ is our differential ring, with ring of constants $R^\partial$ and $D$-stable ideal $I$ as above, then
$$R\{u_1,\ldots,u_n\} := D \otimes_k R[u_{i,j} : 1 \leq i \leq n, j \geq 0]/I.$$
Although being non-noetherian (any non-trivial differential ideal does not fulfill the ascending chain condition), the factor rings we will consider are in fact noetherian.
\begin{defi}
Let $(k,\partial)$ be a differential ring (field). An associative unital $k$-algebra $K$ is called a differential extension over $k$, short $K/k$, if there is an $n \in \nz$ and some differential ideal $I \subset k\{u_1\ldots u_n\}$ such that
$$K \simeq k\{u_1\ldots u_n\}/I.$$
\index{Index}{extension!differential}
\end{defi}
\bmk Given a differential ring $(k,\partial)$ and a finite family of differential polynomials $\mathcal{F} \subset k\{u_1\ldots u_{|\mathcal{F}|}\}$ the associated differential ideal $I$ is simply the differential saturation:
$$I := \left<\partial^i(f) : f \in \mathcal{F},\ i \geq 0\right>.$$
Such a family is called a differential equation - linear if the degree of all monomials is at most one, otherwise non-linear. We call it an explicit differential equation if each element in $\mathcal{F}$ is linear with respect to $\partial u_i$ and all coefficients of monomials of the form $\prod_{i,j} \partial^j u_i$ are zero, for $j \geq 2$. Otherwise, it is called implicit.
\index{Index}{differential saturation}
\index{Index}{differential equation}
\index{Index}{differential equation!linear}
\index{Index}{differential equation!non-linear}
\index{Index}{differential equation!explicit}
\index{Index}{differential equation!implicit}
\begin{defi}
Let $k\{u_1,\hdots,u_n\} =: k\{u\}$ be the ring of differential polynomials over some differential field $k$, $k[\partial]$ the left $k$-module of differential operators on $k\{u\}$. The map
$$\bao{rrcl}
ev &: k[\partial] \otimes_{k^\partial} k\{u\} & \longrightarrow &k\{u\}\\
&&&\\
& \sum_{\substack{0 \leq i \leq n\\\alpha \in \nz_0^k\\k\geq 0}} (a_i \partial^i \otimes b_\alpha u_\alpha) &\longmapsto& \sum_{\substack{i,\alpha\\k \leq i}} \left(\bao{c}i\\k\\\ea\right) a_i \partial^k(b_\alpha) \partial^{i-k}(u_\alpha)\\
\ea$$
is called the evaluation homomorphism.
\end{defi}
\bmk %The evaluation homomorphism gives us a $k[\partial]$-module algebra structure on $k\{u\}$, where $(k[\partial],\mu,\eta,\Delta,\eps)$ is the bialgebra structure on $k[\partial]$ (infact, it has a Hopf-algebra structure via $S : k[\partial] \longrightarrow k[\partial]$, $\partial \longmapsto -\partial$.
Obviously, the definition $ev$ and $\Psi$ are equal. Using this setting we get
\begin{koro}
The two-sided $k$-module $k[\partial]$ defines (in general non-commutative if $k^\partial \neq k$) a unital $k$-algebra.
\end{koro}
\bws As $k$ is a unital commutative simple algebra over $k^\partial$ and $k[\partial]$ is a Ore-extension as shown above, there is nothing more to show.%Note that the map $\mu : k[\partial] \otimes k[\partial] \longrightarrow k[\partial], a \partial^i \otimes b \partial^j \longmapsto \sum \left(\bao{c}i\\k\\\ea\right) a \partial^k(b) \partial^{i+j-k}$ defines a multiplication on the monomial terms of $k[\partial]$. The unit is simply $\eta : K \longrightarrow K[\partial], 1_k \longmapsto 1_{k[\partial]}$.
\bmk Moreover, $k[\partial]$ has a $k^\partial$-coalgebra structure:
$$\Delta =\left[ \partial^i \longmapsto \sum \left(\bao{c}i\\j\\\ea\right) \partial^{i-j} \otimes \partial^j\right],\ \eps = [\partial^i \longmapsto \delta_{i,0}]$$
making it to a $k^\partial$-bialgebra as clearly: $\eps \eta = id_k$, $(\eta \otimes \eta) \circ \eta = \Delta \eta = id_{k.1_{k[\partial]} \otimes k.1_{k[\partial]}}$.% With $S := [\partial^i \longmapsto (-1) \partial^i]$ we get an antipode since $\mu((S\otimes id)(1_k)) = \mu((id_k \otimes S)(1_k)) = \eta(\eps(1_k))$.
\begin{prop}
The following statements are equivalent:
\bn
\item\label{item01} $k\{u\}$ is a $k[\partial]$-module algebra (or $\Psi := ev$ defines a module algebra structure on $k\{u\}$).
\item \label{item02} Given the evaluation homomorphism and multiplication on $k[\partial]$ then the following diagram commutes
$$\xymatrix{
k[\partial] \otimes k[\partial] \otimes k\{u\} \ar[r]^{\mu \otimes id_{k\{u\}}}\ar[d]_{id_{k[\partial]} \otimes ev} & k[\partial] \otimes k\{u\}\ar[d]^{ev}\\
k[\partial] \otimes k\{u\} \ar[r]_{ev} & k\{u\}\\
}$$
\en
\end{prop}
\bws We show first, that the second statement is indeed true.
\bn
\item By simple computation on the monomial terms $a \partial^i, b \partial^j \in k[\partial]$ and $c u_\alpha \in k\{u\}$ we get
{\scriptsize
$$\bao{rcl}
ev(\mu \otimes id_{k\{u\}})(a \partial^i \otimes b \partial^j \otimes c u_\alpha) &=& \sum_{k' \leq i} \sum_{l' \leq i + j - k'} \left(\bao{c}i\\k'\ea\right) \left(\bao{c}j + i - k'\\l'\ea\right) a \partial^{k'}(b) \partial^{l'}(c) \partial^{j+i-k'-l'}(u_\alpha)\\
&&\\
ev(id_{k[\partial]} \otimes ev)(a \partial^i \otimes b \partial^j \otimes c u_\alpha) &=& \sum_{k \leq i} \sum_{l \leq j} \sum_{m \leq i - k} \left(\bao{c}i\\k\ea\right) \left(\bao{c}j\\l\ea\right) \left(\bao{c}i - k\\m\ea\right) a \partial^k(b) \partial^{l+m}(c) \partial^{j+i-k-l-m}(u_\alpha)\\
\ea$$}
Fixing $l' \leq j$ and putting $k = k'$ we see that our equivalence implies $$\sum_{l + m = l'} \left(\bao{c}j\\l\ea\right) \left(\bao{c}i - k\\m\ea\right) a \partial^k(b) \partial^{l+m}(c) \partial^{j+i-k-l-m}(u_\alpha) = \left(\bao{c}j + i - k\\l + m\\\ea\right) a \partial^k(b) \partial^{l + m}(c) \partial^{j + i - l - m - k}(u_\alpha).$$
Since the degree of the differential operators on each factor do agree, we get
$$\sum_{l + m = l'} \left(\bao{c}j\\l\ea\right) \left(\bao{c}i - k\\m\ea\right) = \left(\bao{c}j + i - k\\l + m\\\ea\right).$$
But this is just a rewriting of the Vandermonde identity for all $k \leq i$ and proves our claim.
\item If $k\{u\}$ is a $k[\partial]$ module algebra we have that $(x y) v = x(y v)$ for all $x, y \in k[\partial]$, $v \in k\{u\}$. Expanding with our standard notation this translates into $ev(\mu\otimes id) = ev(id\otimes ev)$ proving \ref{item01} $\RA$ \ref{item02}.\\
The opposite direction follows immediately from the definition.
\en
\begin{koro}\label{HopfModAlgDiffPoly}
With bialgebra structure maps $\mu, \eta, % = [1_k \longmapsto 1_{k[\partial]}],
\Delta$ and $ % = \left[a \partial^i \longmapsto \sum_{0 \leq j \leq i} \left(\bao{c}i\\j\\\ea\right) a \partial^j \otimes \partial^{i - j}\right],
\eps$ as defined above and the $k$-homomorphism
% = [1_{k[\partial]} \longmapsto 1_k, a \partial^i \longmapsto 0],
$$S : k[\partial] \longrightarrow k[\partial],\ \partial^i \longmapsto (-1)^i \partial^i,$$
we have that $k\{u\}$ is a $k[\partial]$-Hopf-module algebra.
\end{koro}
\bws Note that the generator $\partial$ is a primitive cocommutative Hopf algebra element, i.e. $\Delta(\partial) = 1 \otimes \partial + \partial \otimes 1$ implying $S(\partial) = - \partial$. We simply have to show that the commutative diagram for Hopf-algebras does commute.
$$\partial^i \stackrel{\Delta}{\longmapsto} \sum_{j=0}^i\left(\bao{c}
i\\
j\\
\ea\right) \partial^j \otimes \partial^{i-j} \stackrel{S \otimes id}{\longmapsto} \sum_{j} \left(\bao{c}
i\\
j\\
\ea\right) (-1)^j \partial^j \otimes \partial^{i-j} \stackrel{\mu}{\longmapsto} \sum_j \left(\bao{c}
i\\
j\\
\ea\right) (-1)^j \partial^i.$$
According to Pascals rule we see via induction that except for $i = 0$ all sums are zero. As $\eta(\eps(\partial^i)) = \delta_{i,0}$, we have just shown the required commutativity.
%Hence, all elements $\sum c_\alpha u_\alpha$ can be associated with some element of the form $\sum \left(\bao{c} i\\j\\\ea\right) \partial^j(b_\alpha) \partial^{i-j}(u_\alpha)$ we get an (not necessarily unique) element such that for $\rho = \left[\sum c_\alpha u_\alpha \longmapsto \sum \left(\bao{c}i\\j\\\ea\right) \partial^j(b_\alpha) \otimes \partial^{i-j}(u_\alpha)\right]$
\subsection{Linear Differential Equations}
Let $(k,\partial)$ be a (not necessarily non-trivial with characteristic zero) differential field with field of constants denoted by $k^\partial$ and $M$ a noetherian differential module over $k$ (i.e. finite dimensional vector space, with derivation).
\begin{defi}
A (scalar) linear differential equation is $k^\partial$-linear map $L : M \longrightarrow M$, with $L = \sum a_i \partial^i$, i.e. $L \in k[\partial] \subset \trm{End}_{k^\partial}(k \otimes_{k^\partial} M)$. The solution space is the $k^\partial$-subspace of $M$:
$$S(L) := \{x \in M : L(x) = 0\}.$$
\index{Index}{space!solution}
\index{Symbol}{$S(L)$}
\end{defi}
Alternatively, one can define linear differential equations simply via linear algebra:
$$\partial(x) = A x,\ A \in \trm{End}(M),\ x \in M.$$
Its solution space is simply generated by the kernel elements of $\partial - A$ in $M$. Hence, all differential extensions are generated by solutions over $k$. Now, an important definition:
\begin{defi}
Let $K/k$ be a differential extension to the differential equation $\partial(x) = Ax$.
\bn
\item we call a matrix $X \in \trm{Mat}_n(K)$ a solution matrix, if
$$\partial(X) = A X \in \trm{Mat}_n(K).$$
If $X = (x_{ij}) \in \trm{Mat}_n(K)$ is a solution matrix for the above differential equation we call the $n^2 \times n^2$ matrix
$$Wr(X) = \left(\partial^l(x_{ij})\right)_{\substack{0 \leq l \leq n^2-1\\1 \leq i,j \leq n}}$$
the Wronskian matrix. Its determinant is simply called Wronskian.
\item $K/k$ is called a differential extension (over $k$), if $K$ contains the solution space for some linear differential equation $\partial(x) = A x$.
\en
%A solution matrix in the general linear group $\trm{Gl}_n(R)$ is called a fundamental matrix.
\index{Index}{matrix!solution}
\index{Index}{matrix!Wronskian}
\index{Index}{matrix!fundamental}
\index{Index}{Wronskian}
\end{defi}
\bmk As we already defined differential extensions via the ring of differential polynomials $k\{u\}$ (to be precise via differential quotient rings) we shall show both definitions are equivalent. But clearly, the family of differential polynomials is simply $\mathcal{F} := \{\partial u_i - \sum_j a_{i,j} u_j \in k\{u\} : 1 \leq i \leq n\}$ for a given differential equation $\partial u = A u$.\\
\indent Secondly, we get the Wronskian matrix by constructing column vectors $$y_{ij} = \left(x_{ij},\partial(x_{ij}),\ldots,\partial^{n^2-1}(x_{ij})\right)^t$$running over all indices $1 \leq i,j \leq n$. Furthermore, the definition of the Wronskian is broader - for some differential extension $R/k$ and elements
$y_1, \ldots, y_m \in R$ the Wronskian matrix is simply $Wr(y_1,\ldots,y_m) := (\partial^l(y_i))_{1 \leq i,l + 1 \leq m}$.
\begin{defi}
Let $K/k$ be a differential field extension for a given differential equation $\partial y = A y$.
\bn
\item A Picard-Vessiot ring $R$ is a sub-ring of $K$, such that
$R^\partial = k^\partial$, $R$ contains no non-trivial differential ideals and there exists a solution matrix $X \in \trm{Gl}_n(R)$.
\item A solution matrix in a PV-ring $R$ is called a fundamental matrix.
\item A Picard-Vessiot field is the localization of a Picard-Vessiot ring.
\en
\end{defi}
\index{Index}{extension!differential}
\index{Index}{extension!Picard-Vessiot}
\index{Index}{ring!Picard-Vessiot}
\index{Index}{field!Picard-Vessiot}
The last definition requires a little
\begin{lemm}
A simple differential ring is zero-divisor free.
\end{lemm}
\bws Let $a \in \trm{Ann}(R)$, then there is a $b \in \trm{Ann}(R)\bsl\{0\}$, s.t. $a b = 0$. We get $\partial(a b) = 0 = \partial(a) b + a \partial(b) \LRA \partial(a) b = -a \partial(b)$. Multiplying both sides with $b$ we have:
$$\partial(a) b^2 = -a b \partial(b) = 0,$$
i.e. $\partial(a) \in \trm{Ann}(R)$. As the only proper differential ideal is zero, we see that $\trm{Ann}(R)$ is trivial.\\
Now, we have that indeed the localization of a Picard-Vessiot ring is well-defined in that sense that the localization is not the zero ring. Alternatively, we could define the Picard-Vessiot field extension $K/k$ as a field containing the solution space and having the same field of constants, i.e. $K^{\partial} = k^{\partial}$.
\newcommand{\minpoly}[1]{\trm{Min}(\alpha,#1)}
\newcommand{\minpolyC}{\minpoly{k^\partial}}
\newcommand{\minpolyR}{\minpoly{R}}
\begin{lemm}
Let $(k,\partial)$ be a differential field of characteristic zero and let $R$ be a differential subring of $k$
with the same field of constants, i.e. $R^\partial = k^\partial \subset R \subset k$.
\bn
\item If $\alpha \in R$ is algebraic over $k^\partial$, i.e. $\minpolyC \in k^\partial[X]$, with $k^\partial(\alpha) \simeq k^\partial[X]/\left<\minpolyC\right>$, then $\partial(\alpha) = 0$.
\item If $\alpha \in k$ is algebraic over $R$ and $\partial(\alpha) = 0$, then $\alpha$ is algebraic over $k^\partial$.
\en
\end{lemm}
\bws Let $\minpoly{S} = p = \sum_{i=0}^n p_i X^i \in S[X]$ for $S = R, k^\partial$.
\bn
\item By definition, we have $p \in k^\partial[X]$. Hence, evaluating $p$ at $\alpha$ in $R$ gives
$$p(\alpha) = \sum_{i=0}^n p_i \alpha^i = 0$$
Differentiating:
$$\partial(p(\alpha)) = \sum_{i=1}^n p_i \partial(\alpha^i) = \sum_{i=1}^n i p_i \partial(\alpha) \alpha^{i-1} = \left(\sum_{i=0}^{n-1} (i + 1) p_{i+1} \alpha^i\right) \partial(\alpha) = 0$$
Since $p_n = 1$ we see that the left hand factor cannot be zero. On the other hand, $k^\partial(\alpha)$ is an integral domain. Hence, $\partial(\alpha) = 0$.
\item We interpret 'algebraic over $k$ as 'integral over $R$. Thus, there is a monic polynomial as defined above over $R$. Proceeding as in the last part (i.e. evaluating $p$ at $\alpha$ in $R[\alpha]$ and differentiating), we get
$$\partial(p(\alpha)) = \sum_{i=0}^{n-1} \left(\partial(p_{i}) + (i + 1) p_{i+1} \partial(\alpha)\right) \alpha^i + \partial(p_n) \alpha^n = 0.$$
Since $p_n = 1$ we get
$$\partial(p_i) + \underbrace{(i + 1) p_{i+1} \partial(\alpha)}_{=0} = 0$$
for all $i = 0,\cdots,n - 1$. As $\trm{char} R = 0$ implies $\partial(p_i) = 0$ showing $p \in k^\partial[X]$.
\en
\begin{prop}\label{PicardVessiotRing}
Let $\partial(x) = A x$, as above.
\bn
\item A Picard-Vessiot ring $R$ is isomorphic to
$$k[x_{ij},1/\det X],$$
where $X = (x_{ij}) \in \trm{Gl}_k(M)$ is a fundamental matrix.
\item A matrix $X \in \trm{Mat}_n(R)$ is a fundamental matrix, if and only if its Wronskian is non-zero over $k^\partial$.
%\item The entries of the largest sub matrix of the Wronskian matrix $Wr(L) = \left(x^{(k)}_{ij}\right)_{\substack{0 \leq k \leq n^2 - 1\\1 \leq i, j \leq n}}$ is a $C$-basis of the solution space, if and only if its Wronskian matrix has non-zero determinant. The associated fundamental matrix $X$ is spanned by this basis.
\item \label{PVLemma3}Two fundamental matrices $X_1, X_2$ are right-associated wrt. $\trm{Gl}_n(k^\partial)$.
\item \label{PVLemma4}Two PV-rings $R_1, R_2$ of the same equation are isomorphic as differential rings.
\en
\end{prop}
\bws A proof can be found in \cite{vdPS01}, pg. 15. However, for the last two statements we are going to present a sketch of proof:
\bd
\item[ad \ref{PVLemma3}] We have: $\partial(X_{1,2}) = A X_{1,2}$ and assume: $X_2 = X_1 M$ for some $M \in \trm{Gl}_n(R)$. Then:
$$\partial(X_2) = \partial(X_1 M) = \partial(X_1) M + X_1\partial(M) = A X_1 M + X_1 \partial(M) \stackrel{!}{=} A X_2 \LRA M \in \trm{Gl}_n(k^\partial).$$
We recall that $X_i \in \trm{Gl}_n(R)$ and the $X_1, X_2$ are left-associated if and only if $[M, A] = 0$.
\item[ad \ref{PVLemma4}] Let $S(L)_i$ denote the two solution spaces. As we just saw, we may define an isomorphism of differential modules $\phi : S(L)_1 \longrightarrow S(L)_2, x_{ik} \longmapsto \sum_{j} x_{ij} m_{jk}$. As the PV-ring is commutative, we deem $\trm{Sym}(S(L)_i)$ as an appropriate choice for construction (not necessarily the PV-ring, rather a subring). Now, we may extend $\phi$ to both algebras:
$$s = \sum s_i x^i = \sum s_i x_1^{i_1} \ldots x_{n}^{i_n} \longmapsto \sum s_i \phi(x_1)^{i_1} \ldots \phi(x_n)^{i_n}.$$
Since $1_{S(L)_1} \longmapsto 1_{S(L)_2}$ and $\phi$ is an isomorphism on $\trm{Sym}^1(S(L)_i)$ we get the desired isomorphism of differential rings. Localizing both algebras wrt. $\det X_i$ we have that $R_i \simeq S_{\det X_i} (L)_i$ for $i = 1,2$.
\ed
\bmk $\trm{Sym}(S(L)) \simeq k[x_1,\ldots,x_n]$ is not a simple differential ring (the maximal ideal $I = \left<x_i : 1 \leq i \leq n\right>$ is closed under $\partial$-action). Nevertheless, its localization wrt. $\det X = \sum_{\sigma \in S_n} sign(\sigma) \prod_{i=1}^n x_{i,\sigma(i)}$ is a simple differential ring, as $S_{\det X}^{-1}I$ contains units ($S_{\det X}^{-1}I = R$).\\
\indent In \cite{vdPS01}, the following theorem is given (prop. 2.9, pg. 40 and lem 2.10, pg. 41):
\begin{satz}
Let $\partial(x) = A x$ be a linear differential equation. There are $L_i \in k[\partial]$, such that
$$V_A := \{y \in M : \partial(y) = A y\} \simeq \bigoplus_{i=1}^n k[\partial]/k[\partial].L_i^*.$$
To specify, each matrix equation has a solution space $V_A$ isomorphic to the solution space of a scalar equation $L = \prod L_i$.
\end{satz}
The decomposition is not unique wrt. left- and right-sidedness, as $k[\partial]$ is in general not commutative. We are not going to prove this theoremo - only loosely scatching a proof.\\
First we note, for any field $k$ and a matrix $A \in \trm{Mat}_n(k)$, there exists vectors $c_i \in \bigoplus_{i=0}^n k.e_i$ such that $B_i = \left\{c_i,...,A^{j_i} c_i : i\right\}$ is a $k$-basis. The elements $c_i$ are called cyclic vectors, each subspace $M_i$ generated by such an element is called a cyclic vector space and the transformed matrix wrt. to the cyclic basis is called the rational normal form of $A$. If $\chi_A \in k[X]$ is the charactistic polynomial of $A$ and $\prod_{j = 1} p_j^{s_j}$ a prime decomposition of $\chi_A$ than its minimal polynomial, which is the monic generator of $\{f \in k[X] : f(A) = 0\}$, gives us all distinct cyclic vectors (each cyclic vectors space has different dimension $t_j \deg p_j$, where $p_j^{t_j} \mid m_A$ and $p_j^{t_j+1} \nmid m_A$). A similar reasoning is provided in \cite{vdPS01}, chapter 2. Here, a cyclic decomposition is given for any linear homogeneous differential equation.
% due to the maximality of $I$: any differentially closed ideal containing $I$ is already $R = (1)$. To see that its ring of constants $R^\partial = C$, consider $f = \sum_{i} f_i x_1^{i_1} \ldots x_n^{i_n}/\det X^{i_{n+1}} \in \ker \partial_R$:
%$$\partial(f) := \sum_i \partial(f_i) x^i/\det X^{i_{n+1}} + \sum_i i_k f_i x_1^{i_1} \ldots \partial(x_k) x_k^{i_k-1} \ldots x_n^{i_n}/\det X^{i_{n+1}}$$
\subsubsection{Differential Galois group}
In the theory of field extensions, the Galois group is the set of $k$-vector space automorphisms permuting the root elements of a given polynomial over $k$ leaving $k$ fixed. For a given linear differential equation, the definition is slightly different:
\begin{defi}
Let $\partial(x) = A x$ for some $A \in \trm{Gl}_k(M)$ and $x \in M$. The differential Galois group is the zentralizer of $\left<\partial\right> \subset \trm{Gl}_R(M)$, i.e.
$$\trm{DGal}(R/k) = \{\varphi \in \trm{Gl}_{k^\partial}(M) : \partial \varphi = \varphi \partial\}.$$
\index{Symbol}{$\trm{DGal}(R/k)$}
\end{defi}
This property is also called equivariance (e.g. in algebraic geometry).
%\paragraph{Examples for the linear case}
\begin{satz}[Galois correspondence]
Let $K/k$ be a PV extension for $\partial(x) = A x$. Let $\mathfrak{G}$ be the set of all closed subgroups of $G := \trm{DGal}(K/k)$ and $\mathcal{M}$ the set of all differential subfields $k \subset M \subset K$. In analogy to algebraic fields extension theory, we define
$$\bao{rrclrcl}
\trm{Fix} : &\mathfrak{G} &\longrightarrow& \mathcal{M},& H &\longmapsto& K^H := \{x \in K : \sigma(x) = x\ \forall \sigma \in H\}\\
&&&&&\\
\trm{DGal} : & \mathcal{M} &\longrightarrow& \mathfrak{G},& M &\longmapsto& \trm{DGal}(M/k),\\
\ea$$
then we have:
\bn
\item both functors are inverse to one another.
\item $H \in \mathfrak{G}$ is normal in $G$ if and only if for $M = K^H$:
$$G(M) \subset M,$$
i.e. is $G$-invariant as a set ($g x \in M$ for all $g \in G$, $x \in M$).
\item If $H \in \mathfrak{G}$ is normal then the canonical projection $G \longrightarrow \trm{DGal}(K^H/k)$ is surjective and has $H$ as kernel. Furthermore, $K^H$ is a PV extension for some linear differential equation over $k$.
\item Let $G^0 \leq G$ be the connected component of identity, then $K^{G^0} = k$.
\en
\end{satz}
\bmk A proof is given in \cite{vdPS01}. The connected component of identity is to be understood as follows. If $(G,m,e,\tau)$ is a topological group, with $\tau$ its topology, an open subset $U \in \tau$ is called connected if the only disjoint union of open subsets $U' \cup U'' = U$ is trivial (i.e. $U' \in \{\emptyset, U\}$, $U'' = U\bsl U'$). In this case, $U$ is also called a connected component. If furthermore, $1 \in U$ is a subgroup, $U$ is called the connected component of identity which is denoted by $G^0$. These definitions were taken from \cite{Milne}.
\bsp Some examples:
\bn
\item $(\zz,+,0,\mathcal{P}(\zz))$ is a topological group wrt. discrete topology. Hence, all pointed subspaces are open subsets. Therefore, $\{0\} = \zz^0$.
\item $(\rz^\times,\cdot,1,\tau_{\trm{standard}})$ is a topological group with connected components: $\rz_{>0}, \rz_{<0}$. Hence, $\rz_{>0} = \left(\rz^{\times}\right)^0$.
\en
\subsection{Example}
Now, let us consider two simple examples.
\subsubsection{1-dim case}
Let $k = \currfield(z)$ with $\currfield$-derivation $\partial = \frac{d}{d z}$, as well as $a \neq 0$ and $\partial(x) = ax$. Here, we have a 1-dimensional differential equation, i.e. the solution space is a 1-dim $\currfield$-vector space. Let us compute the
\bd
\item[Fundamental matrix] Put $X = x \in \trm{Gl}_1(\currfield(z))$. Clearly, $Wr(X) = (x)$ and $\det Wr(X) = x \neq 0$. Hence, $x$ belongs to the $\currfield$-basis of the solution space.
\item[Picard-Vessiot ring] By prop. \ref{PicardVessiotRing} we know that $R \simeq \currfield(z)[x,1/x]$.
\item[Differential Galois group] Pick some $f \in \trm{Gl}_1(R)$, by definition $f \in \trm{DGal}(R/k)$ if $f$ commutes with $\partial$:
$$\partial(f x) = \partial(f) x + f a x = f a x = f \partial(x) \LRA \partial(f) = 0,$$
i.e. $f$ is in $\currfield.id$. Thus, we have that $\trm{DGal}(R/k) = \left<\partial\right> \simeq \qz^\times$.
\ed
\subsubsection{2-dim case} \label{twoD}
Let $k \subseteq \currfield$ with trivial derivation $\partial\mid_{\currfield} = 0_{\currfield}$, as well as $a \neq 0$ and $\partial^2(x) = a x$. Clearly, the solution space in question is contained in a two-dimensional space with companion matrix $A = \left(\bao{cc}
0 & 1\\
a & 0\\
\ea\right)$.
\paragraph{Fundamental matrix} Put $X = \left(\bao{cc}
x_{11} & x_{12}\\
x_{21} & x_{22}\\
\ea\right)$ as fundamental matrix with $\partial(x_{11}) = x_{21}, \partial(x_{21}) = a x_{11}$ as well as $\partial(x_{12}) = x_{22}, \partial(x_{22}) = a x_{12}$. The Wronskian matrix is
{\scriptsize
$$\left(\bao{cccc}
x_{11} & x_{21} & x_{12} & x_{22}\\
x_{21} & a x_{11} & x_{22} & a x_{12}\\
a x_{11} & a x_{21} & a x_{12} & a x_{22}\\
a x_{21} & a^2 x_{11} & a x_{22} & a^2 x_{12}\\
\ea\right)
$$}
Obviously, $\det Wr(X) = 0$ as the fourth and third row are linear combinations of the first two rows. For instance, $x_1 = x_{11}$ and $x_2 = x_{21}$ forms a $\currfield$-basis of the solutions space. Since the remaining indeterminates are linearly dependent over $\currfield$ we compute for $x_{12} = \lambda_{11} x_1 + \lambda_{12} x_2, x_{22} = \lambda_{21} x_1 + \lambda_{22} x_2$ and $\lambda_{ij} \in \currfield$:
$$\bao{rclcl}
\partial(x_{12}) &=& \lambda_{11} \partial(x_1) + \lambda_{12} \partial(x_2) &=& \lambda_{12} a x_1 + \lambda_{11} x_1\\
&&&&\\
&\stackrel{!}{=}& \lambda_{21} x_1 + \lambda_{22} x_2 &\LRA& \lambda_{21} = \lambda_{12}, \lambda_{22} = \lambda_{11} a\\
&&&&\\
\partial(x_{22}) &=& \lambda_{21} \partial(x_1) + \lambda_{22} \partial(x_2) &=& a \lambda_{11} x_1 + a \lambda_{11} x_1\\
&&&&\\
&\stackrel{!}{=}& a \lambda_{11} x_1 + a\lambda_{12} x_2\\
\ea$$
We see that we may chose any $\lambda_{11} =: \lambda_1, \lambda_{12} = \lambda_2 \in \currfield$ such that $\det X$ is a unit. Continuing:
$$\det X = \det \left(
\bao{cc}
x_1 & \lambda_1 x_1 + \lambda_2 x_2\\
x_2 & a \lambda_2 x_1 + \lambda_1 x_2\\
\ea\right) = %a \lambda_2 x_1^2 + \lambda_1 x_1 x_2 - \lambda_1 x_1 x_2 - \lambda_2 x_2^2
= a \lambda_2 x_1^2 - \lambda_2 x_2^2 = \lambda_2 (a x_1^2 - x_2^2).$$
We can set $\lambda_1$ arbitrarily but chose zero and $\lambda_2 = 1$,
%Identifying the remaining variables with $x_{12} = a^{-1} x_2$, $x_{22} = a x_1$
we get the fundamental matrix $X = \left(\bao{cc}
x_1 & x_2\\
x_2 & a x_1\\
\ea\right)$. Its inverse is
$$X^{-1} = \frac{1}{a x_1^2 - x_2^2} \left(
\bao{cc}
a x_1 & -x_2\\
-x_2 & x_1\\
\ea\right)$$
In Heidereich 2010, he provides the condition $\sum_{i=0}^n a_i \partial^i(X) X^{-1} \in M_2(k)$ for the fundamental matrix. But clearly, if $\partial(X) = A X$ then $\partial^i(X) = A^i X$ and the condition holds.% In our case:
%$$\partial(X)X^{-1} = A X X^{-1} = \frac{1}{a x_1^2 - a^{-1} x_2^2}\left(\bao{cc}
%x_2 & a x_1\\
%a x_1 & a x_2\\
%\ea\right)\left(
%\bao{cc}
%x_1 & a^{-1} x_1\\
%x_2 & a x_2\\
%\ea\right) = A.$$
\paragraph{Differential Galois group} we have $g = (g_{ij}) \in \trm{DGal}(\currfield[x_1,x_2,1/\det X]/\currfield)$ iff $g A - A g = 0$ and $\det g \in \currfield^\times$. We get:
$$\bao{cc}
a g_{12} - g_{21} = 0 & -a g_{12} + g_{21} = 0\\
a g_{11} - a g_{22} = 0 & -a g_{11} + a g_{22} = 0.\\
\ea$$
Therefore we have $g = \left(\bao{cc}
g_{11} & g_{12}\\
a g_{12} & g_{11}\\
\ea\right)$. The determinant is $\det g = g_{11}^2 - a g_{12}^2$. If $g_{ij} \in \currfield$ we get that $\det g = 0 \LRA g_{11}^2 = a g_{12}^2$. Since $\currfield$ is algebraically closed, there is a $b \in \currfield$ such that $b^2 = a$. Hence, $g_{11} = \pm b g_{12}$. Thus we get:
$$\left\{\left(
\bao{cc}
g_{11} & g_{12}\\
a g_{12} & g_{11}\\
\ea\right) \in \trm{Gl}_2(\currfield) : g_{11} \neq \pm b g_{12}\right\}$$
as a subgroup of the differential Galois group. To conclude, the Galois group is
$$\trm{DGal}(\currfield[x_1,x_2,1/\det X]/\currfield) = \left<g\right> \supset \left<\partial\right>.$$
Now, we can express $\trm{DGal}(\currfield[x_1,x_2,1/\det X]/\currfield)$ as an open algebraic set $U$ in $\aff{2}_{\currfield}$:
$$\bao{rcl}
\trm{DGal}(\currfield[x_1,x_2,1/\det X]/\currfield) &\simeq_{\trm{claim}}& U\\
&:=& \{(g_{11},g_{12}) \in \aff{2}_{\currfield} : g_{11}^2 - a g_{12}^2 \neq 0\}\\
&=& \aff{2}_{\currfield} \bsl \left\{(g_{11},g_{12}) \in \aff{2}_{\currfield} : g_{11}^2 - a g_{12}^2 = 0\right\}\\
&&\\
&=& \aff{2}_{\currfield}\bsl Z(g_{11}^2 - a g_{12}^2)\\
\ea$$
As is shown in \cite{CohCuySte}, the differential Galois group is a subgroup of $\trm{Sl}(S(L))$. Therefore we may substitute:
$$U' = \{(g_1',g_2') \in \aff{2}_{\currfield} : g_1'^2 - a g_2'^2 = 1\}.$$
The multiplication is simply:
$$m_U := \left[\left((g_{11},g_{12}) , (g'_{11},g'_{12})\right) \longmapsto (g_{11} g'_{11} + a g_{12} g'_{12}, g_{11} g'_{12} + g'_{11} g_{12})\right].$$
Checking:
$$\bao{rcl}
(g_{11} g'_{11} + a g_{12} g'_{12}, g_{11} g'_{12} + g'_{11} g_{12}) &\stackrel{?}{\in}& U:\\
&&\\
(g_{11} g'_{11} + a g_{12} g'_{12})^2 - a (g_{11} g'_{12} + g'_{11} g_{12})^2 &=& g_{11}^2 {g'}_{11}^2 + a^2 g_{12}^2 {g'}_{12}^2 + 2 a g_{11} g_{12} g'_{11} g'_{12}\\
&&- a\left(g_{11}^2 {g'}_{12}^2 + g_{12}^2 {g'}_{11}^2 + 2 g_{11} g_{12} {g'}_{11} {g'}_{12}\right)\\
&&\\
&=& (g_{11}^2 - a g_{12}) {g'}_{11} - (g_{11}^2 - a g_{12}) a {g'}_{12}\\
&&\\
&=& \underbrace{(g_{11}^2 - a g_{12})}_{= 1} \underbrace{({g'}_{11}^2 - a {g'}_{12})}_{= 1} = 1,\\
\ea$$
implying $m_U\left((g_{11},g_{12}),({g'}_{11},g'_{12})\right) \in U$. Moreover, the map $\iota: U \ni (g_{11},g_{12}) \longmapsto g \in \trm{DGal}(\currfield[x_1,x_2,1/\det X]/\currfield)$ is a morphism of groups (by definition), injective as only $(1,0) \in \ker \iota$ and surjective as $(g_{11},g_{12}) \in \iota^{-1}(g)$. This proves the claim the above defined open subset $U$ is isomorphic to $\trm{DGal}(\currfield[x_1,x_2,1/\det X]/\currfield)$.
%This group has three classes of subgroups:
%$$H_1 := \left\{\left(\bao{cc}
%g_{11} & 0\\
%0 & g_{11}\\
%\ea\right) : g_{11} \in \currfield^\times\right\},\ H_2 := \left\{\left(\bao{cc}
%0 &g_{12}\\
%a g_{12} & 0\\
%\ea\right) : g_{12} \in \currfield^\times\right\},$$
%$$\ H_3 := \left\{\left(
%\bao{cc}
%g_{11} & g_{12}\\
%a g_{12} & g_{11}\\
%\ea\right) : (g_{11},g_{12}) \in G_{<\infty} \times \currfield^\times \cap U\right\}$$
On the other hand, $G$ can be viewed as an closed subset of $\aff{3}_{\currfield}$:
$$I := \left<(X^2 - a Y^2) Z - 1\right> \subset \ov{\qz}[X,Y,Z]\ \RA\ Z(I) = \left\{\left(g_{11}, g_{12}, 1/(g_{11}^2 - a g_{12}^2)\right) : g_{11}, g_{12} \in \currfield\right\}$$
Its multiplication is given by
$$\bao{rrcl}
m_{Z(I)} :& Z(I) \times Z(I) & \longrightarrow & Z(I)\\
&&&\\
&((g_{11}, g_{12}, (g_{11}^2 - a g_{12}^2)^{-1}), & \longmapsto &(g_{11} {g'}_{11} + a g_{12} {g'}_{12}, g_{11} {g'}_{12} + g_{12} {g'}_{11},\\
& ({g'}_{11}, {g'}_{12}, ({g'}_{11}^2 - a {g'}_{12}^2)^{-1}))&&(g_{11}^2 - a g_{12}^2)^{-1}({g'}_{11}^2 - a {g'}_{12}^2)^{-1})\\
\ea$$
The proof of the above claim follows analogously to the open case. %The main difference is that $Z(I) = U \times \currfield^\times \cup \left\{0_{\aff{3}_{\currfield}}\right\}$, i.e. Zariski-dense and $Z(I)$ is a monoid (has $0$ as non-invertible and idempotent).
\paragraph{Characterization of field extension}
\bn
\item As we noted above, $\partial(\det X) = 0$. Since $R$ is a simple differential ring, we conclude $\det X \in k^\partial = k = \currfield$. Thus there is some $b = \det X \in \currfield^\times$. Therefore, $K = S^{-1}R \simeq \currfieldx[X]/\left<X^2 - a x_1^2 + b\right>$. In other words: the Picard-Vessiot field $K$ is an algebraic extension of (some appropriate) transcendental extension of $\currfield$. Rescaling our differential equation, we may assume that $b = 1$.
\item An immediate consequence of the above is the following: We consider $R' := \currfield[x_1,x_2] \simeq \trm{Sym}(S(L))$ wrt. the $\currfield$-basis $\{x_i : i = 1,2\}$. Now, take $I = \left<x_2^2 - a x_1^2 + 1\right> \subset \trm{Sym}(S(L))$, we get:
$$\trm{Sym}(S(L))/I \simeq R'/\left<x_2^2 - a x_1^2 + 1\right>.$$
Since the $\currfield$-derivation does not reduce the degree of any polynomial of positive degree we can use the degree induced filtration on $\trm{Sym}$:
$$\filteredA^{\leq l} = \left\{x \in \trm{Sym}(S(L)) : \deg x \leq l\right\} \ \RA \partial_{\trm{Sym}}\mid_{\filteredA^{\leq l}} (\filteredA^{\leq l}) \subset \filteredA^{\leq l},\ \filteredA^{\leq 0} := \currfield,\ \filteredA^{\leq -n} := \{0\}, n, l \in \nz,$$
i.e. $\filteredA^{\leq l}$ is $\partial_{\trm{Sym}}$ invariant $\currfield$-subspace. Here we set $\deg(0) := -\infty$ and $\deg$ is the sum degree. On the other hand:
$$\filteredA^{\leq 2} = \bigoplus_{|\alpha| \leq 2} \currfield.x_1^{\alpha_1} x_2^{\alpha_2}\ \RA\ \filteredA^{\leq2} / \left(\filteredA^{\leq2} \cap I\right) = \currfield \oplus \currfield.x_1 \oplus \currfield.x_2 \oplus \currfield.x_1^2 \oplus \currfield.x_1 x_2,$$
in particular $\lambda_{0,2} x_2^2 = \lambda_{0,2} - a \lambda_{0,2} x_1^2 \in \currfield \oplus \currfield.x_1^2$ for any $\lambda_{0,2} \in \currfield$. In general, for $\filteredA^{\leq n}$ we may substitute all terms $x_2^{2 l}$ with $(a x_1^2 - 1)^l$ and all $x_2^{2l + 1}$ with $(a x_1^2 - 1)^l x_2$, for all $l \in \{d \in \nz : 2 \mid d\}$. Therefore the quotient spaces are
$$A_n := \filteredA^{\leq n}/ \left(\filteredA^{\leq n} \cap I\right) /\left(\filteredA^{\leq n - 1} / \left(\filteredA^{\leq n - 1} \cap I\right) \right)\simeq \currfield.x_1^n \oplus \currfield.x_1^{n-1} x_2,$$
and therefore its associated graded $\currfield$-algebra is obviously:
$$A = \bigoplus_{n \geq 0} A_n \simeq \currfield[x_1] \oplus \currfield[x_1].x_2.$$
Summarizing we get
$$\trm{Sym}(S(L))/I = \filteredA/I = \bigcup_{n \geq 0} \filteredA^{\leq n}/ \filteredA^{\leq n} \cap I \simeq A \simeq \currfield[x_1] \oplus \currfield[x_1].x_2,$$
with multiplication
$$\mu = \left[x_1^{i_1} x_2^{j_1} \otimes x_1^{i_2} x_2^{j_2} \longmapsto x_1^{i_1 + i_2} (a x_1^2 - 1)^{(j_1 + j_2 - (j_1 + j_2 \mod 2))/2} x_2^{j_1 + j_2 \mod 2}\right],\ \forall i_l \in \nz, j_l = 0, 1.$$
We note that the subalgebra $\currfield[x_1]$ is not a differential algebra as
$$\partial = \left[x_1^i x_2^j \longmapsto \begin{cases}
(1 + i) a x_1^{i+1} - i x_1^{i-1}& j = 1\\
i x_1^{i-1} x_2 & j = 0\\
\end{cases}\right].$$
Localizing wrt. $S := \currfield[x_1]\bsl\{0\}$, i.e. non-zero polynomials, and identifying with all corresponding symmetric tensors we have that
$$K^+ := S^{-1} \left(\currfield[x_1] \oplus \currfield[x_1].x_2\right) = S^{-1} \currfield[x_1] \oplus S^{-1} \currfield[x_1].x_2 = \currfieldx \oplus \currfieldx.x_2.$$
However, we have not shown yet whether $K^+$ is an integral domain. Now let us consider $p = Y^2 - a x_1^2 + 1 \in \currfieldx[Y]$.
$$Y^2 - \underbrace{a x_1^2 + 1}_{= \alpha^2} = (Y - \alpha)(Y + \alpha).$$
If $\alpha \in \currfieldx$, then $\ov{Y \pm \alpha} = \ov{Y} \pm \alpha$ are zero-divisors in the quotient ring and with $\trm{gcd}(Y - \alpha, Y + \alpha) = 1$ we get
$$\currfieldx[Y]/\left<Y^2 - a x_1^2 + 1\right> \simeq \currfieldx^2$$
as a $\currfieldx$-vector space, with multiplication:
$$\mu(a \otimes \_) := [b \longmapsto a b] \equiv \left(\bao{cc}
a_0 & (a x_1^2 - 1) a_1\\
a_1 & a_0\\
\ea\right) \in M_2(\currfieldx),\ \forall a \equiv (a_0, a_1) \in \currfieldx^2$$
and with derivation:
$$\partial = \left[
\bao{ccc}
(1_{\currfield} x_1^i, 0) &\longmapsto& (0 , i x_1^{i-1})\\
(0, 1_{\currfield} x_1^i) &\longmapsto& ((1 + i) a x_1^i - i x_1^{i-1},0)\\
\ea
\right].$$ Hence, the Picard-Vessiot field $K$ would be simply $\currfieldx$. This implies:
$$\exists \alpha \in \currfield.x_1 \bsl \{0\}:\ \partial_{\currfieldx}(\alpha) \in \currfield.x_1 \LRA \exists v \in \ker (\alpha id_{\currfield.x_1} - \partial) \bsl\{0\},$$
i.e. an eigenvector of $\partial_K$. On the other hand, is $[\currfield(x_1,\alpha) : \currfieldx] := \deg \trm{Min}(\alpha,\currfieldx) = 2$:
$$\RA\ (K, \partial_K) \simeq \left(\currfield\left(x_1,\sqrt{a x_1^2 - 1}\right), \partial_{K_1}\right)$$% \simeq \left(\currfield\left(x_2,\sqrt{(1 + x_2^2)/(a)}\right), \partial_{K_2}\right),$$
is a field, with $\currfieldx/\currfield$ transcendental and with $\currfield$-derivation:%s:
$$\bao{rrcl}
\partial_{K}%_1}
: &\currfield\left(x_1,\sqrt{a x_1^2 - 1}\right)&\longrightarrow&\currfield\left(x_1,\sqrt{a x_1^2 - 1}\right)\\
&&&\\
&x_1 &\longmapsto& \sqrt{a x_1^2 - 1}\\
&\sqrt{a x_1^2 - 1} &\longmapsto& a x_1\\
&&&\\
%\partial_{K_2} : &\currfield\left(x_2,\sqrt{(1 + x_2^2)/a}\right)&\longrightarrow&\currfield\left(x_2,\sqrt{(1 + x_2^2)/a}\right)\\
%&&&\\
%&x_2 &\longmapsto& \sqrt{\frac{1 + x_2^2}{a}}\\
%&\sqrt{\frac{1 + x_2^2}{a}} &\longmapsto& a x_2\\
\ea$$
However, we claim that since $x_1$ is transcendental over $\currfield$ and the polynomial $a x_1^2 +1$ is no square in $\currfield[x_1]$, the polynomial $X^2 - a x_1^2 + 1$ is irreducible over $\currfieldx$. First, let us assume there is a root $\alpha = \frac{\beta}{\gamma} \in \currfieldx$, where $\trm{gcd}(\beta,\gamma) = 1$ and $\gamma \neq 0$. Then we compute:
$$\alpha^2 = a x_1^2 - 1 = \frac{\beta^2}{\gamma^2} \LRA \beta^2 = \gamma^2 (a x_1^2 - 1),$$
i.e. $\beta \in \currfield[x_1]$ is a root of the polynomial $X^2 - \gamma^2(a x_1^2 - 1)$. Having $\beta = \sum_{j=0}^{n_1} \beta_j x_1^i, \gamma = \sum_{i=0}^{n_2} \gamma_i x_1^i$, with $\gamma_i, \beta_j \in \currfield$, we get that $n_1 = n_2 + 1$ due to the factor $a x_1^2$. On the other hand, both decompose to linear factors due to algebraic closedness of $\currfield$, e.g.:
$$\beta = \wt{\beta}_0 \prod_{i=1}^{m_1} (x_1 - \wt{\beta}_i)^{s_i},\ \gamma = \wt{\gamma}_0 \prod_{j=1}^{m_2} (x_1 - \wt{\gamma}_j)^{t_j},\ \trm{and}\ \sum_{i=0}^{m_1} s_1 = \sum_{j=0}^{m_2} t_j + 1.$$
Clearly, all but at most two factors on both sides of our polynomial equation cancel. So we get $n_2 \leq 0$ and $n_1 \leq 1$. In particular, $\alpha$ itself is already in $\currfield[x_1]$ for $n_2 = 0$ (i.e. $\gamma \neq 0$). This is not possible, as
$$\wt{\beta}_0^2 (x_1 - \wt{\beta}_1)^2 = \wt{\beta}_0^2(x_1^2 - 2 \wt{\beta}_1 x_1 + \wt{\beta}_1^2) = \wt{\gamma}_0^2 (a x_1^2 - 1) \LRA 2 \wt{\beta}_1 = 0 \wedge \wt{\beta}_0^2 \wt{\beta}_1^2 = -\wt{\gamma}_0^2$$
leads to the contradiction $\gamma = 0$. Hence, $\alpha$ is algebraic and $[\currfieldx(\alpha) : \currfieldx] = 2$.\\
\indent Nevertheless, one aspect we have not considered yet which we will discuss now.
%However, since $\currfield$ is algebraically closed, we get the abovementioned decomposition which will get discussed right now.
\item $\currfield(x_1,x_2) = K$ is contained in a $\currfield$-algebra generated by one distinct transcendental element $y$ being a unit in $\currfield[x_1,x_2]$: The ring $R = \currfield[x_1,x_2,\det X^{-1}]$ contains four non-trivial units (i.e. elements in $R^\times \bsl \currfield^\times$). Namely:
$$\pm \sqrt{a} x_1 \pm x_2,$$
to specify: four non-trivial divisors of $\det X$ in $\currfield[x_1,x_2]$. Computing their derivative wrt. $\partial$, we get:
$$\bao{rclcl}
\partial(\sqrt{a} x_1 \pm x_2) &=& \sqrt{a} x_2 \pm a x_1 &=& \pm \sqrt{a} (\sqrt{a} x_1 \pm x_2)\\
&&&&\\
\partial(-\sqrt{a} x_1 \pm x_2) &=& -\sqrt{a} x_2 \pm a x_1 &=& \mp\sqrt{a} (-\sqrt{a} x_1 \pm x_2).\\
\ea$$
Each of the four elements is an eigenvector of $\partial$ in $R$ with eigenvalue $\pm \sqrt{a}$ - or equivalently, solves the following differential equations:
$$L_{\pm} := \partial(x) \mp \sqrt{a} x = 0,$$
where the subscript sign defines the sign of the eigenvalue. Defining $y_1 \in S(L_+), y_{-1} \in S(L_-)$ we get that $K = \currfield(x_1,x_2)$ contains two subfields depicted in the following diagram:
$$\xymatrix{
& \currfield(y_1) \ar@{^{(}->}[rd]&\\
\currfield\ar@{^{(}->}[rd]\ar@{^{(}->}[ru]&&\currfield(x_1,x_2) \simeq \currfield(y_1)\\
& \currfield(y_{-1}) \ar@{^{(}->}[ru]&\\
}$$
By direct computation, we see that:
$$L = \partial^2 - a \cdot id = (\partial - \sqrt{a}) (\partial + \sqrt{a}).$$
And our differential module $S(L)$ decomposes:
$$S(L)^* \simeq \currfield[\partial]/\currfield[\partial].L \simeq \currfield[\partial]/\currfield[\partial].L_+ \oplus \currfield[\partial]/\currfield[\partial].L_- \simeq S(L_+)^* \oplus S(L_-)^*.$$
\en
\paragraph{The subfields}
Firstly, we like to compute the Picard-Vessiot ring for $M_{i} = \currfield(y_i)$:
$$R_i = \currfield[y_{i}, y_i^{-1}],\ \trm{for}\ i = \pm1$$
%As the image of the ideal $\left<y_i\right> \subset \currfield[y_i]$ is the whole ring under inclusion or equivalently $1_R \in R_i.y_i$, we have indeed that $R_i$ is simple differential.
%Let $I \subset R_i$ be a proper ideal and we assume differential closedness - i.e. $\partial(I) \subset I$.
%As a noetherian $R$-submodule of a noetherian module $R$ (generated by $y_{\pm 1}$ over $\currfield$), $I$ is finitely generated. Hence, let $S:= \{s\} \subset I$ be one generating set. By differential closedness, we get for any $s \in S$:
%$$\partial(s) = \partial\left(\sum_{i=-m}^n s_i y_1^i\right) = \sum_{i=-m}^n s_i \partial(y_1^i) = \sum_{i=-m}^n i \sqrt{a} s_i y_1^i \in I$$
%$$\LRA \partial(s) - s = \sum_{i=-m}^n (i \sqrt{a} - 1) s_i y_1^i \in I$$
%But both, $s, \partial(s) - s$ are of degree $n$, or $m$ wrt. $y_{\pm 1}$ and $y_1^m (\partial(s) - s) \in \currfield[y_1]$.
% There is an ideal $I' \subset \currfield[y_1]$, such that $S_{y_1}^{-1}(I') \supset I$. By definition of $I$, we get
%$$y_1^m t \in I' \RA \partial(y_1^m t) = \underbrace{m \sqrt{a} y_1^m t}_{\in I'} + \underbrace{y_1^m \partial(t)}_{\in \partial(I')},$$
%but identifying $I' := I \cap \frac{\currfield[y_1]}{1}$ we get $\partial(I') \subset I'$. Being a PID, all ideals $I' \subset \currfield[y_1]$ are of the form $\left<s\right>$. On the other hand, $\partial$ operates on all weight spaces $\currfield.y_1^i$, $i \geq 1$, invariantly:
%$$\bao{rrcl}
%\partial_i := \partial\mid_{\currfield.y_1^i} : &\currfield.y_1^i &\longrightarrow& \currfield.y_1^i\\
%&&&\\
%&y_1^i &\longmapsto&i \sqrt{a} y_1^i\\
%\ea$$
%Thus we have:
%$$\partial s = \sum_{i=0}^n s_i \partial(y_1^i) = \sum_{i=0}^n i \sqrt{a} s_i y_1^i \in \left<s\right> \LRA \partial(s) \equiv 0 \mod s$$
%$$\LRA \sum_{i=0}^{n-1} (i - n) \sqrt{a} s_i y_1^i = 0 \LRA s_i = 0 \vee n - i = 0\ \forall 0 \leq i \leq n - 1,$$
%Hence, each derivative of the generators $s$ agree in degree but also reduce to zero modulo $\left<s\right>$ contradicting our claim $\partial(s) \in \left<s\right>$. Thus, all $D$-stable ideals in $R$ are indeed trivial.
Firstly, we get:
$$\partial(y_i^{-1}) = -\frac{\partial(y_i)}{y_i^2} = -i \sqrt{a} y_i^{-1}, i =\pm1$$
implying the multiplicative inverse solves the opposite differential equation. Hence, $R_i$ already contains all solutions of $L_\pm$ by simply putting $y_{-1} := y_1^{-1}$.\\
\indent Let $R = R_1$. Furthermore, we want to show that $R$ is indeed a simple differential ring over $\currfield$. Clearly, $\partial$ operates invariantly on $\currfield.y_{\pm 1}^i$ for all $i \neq 0$. In addition, $R$ is isomorphic to $S_{X}^{-1}\currfield[X]$, the localization of $\currfield[X]$ where
$$S_X = \currfield[X] \bsl \bigcup_{\substack{\mathfrak{p} \in \trm{Spec}(\currfield[X])\\X \notin \mathfrak{p}}} \mathfrak{p} = \{f \in \currfield[X] : f \notin \idealp\ \forall \idealp \in \trm{Spec} \currfield[X] \bsl \{\left<X\right>\}\}.$$ Furthermore, every prime ideal in $\currfield[X]$ is generated by $X - \alpha$, for some $\alpha \in \currfield$ as $\currfield$ is algebraically closed. Hence, on the one hand we get:
$$S_X = \{f \in \currfield[X]\bsl \currfield : f \notin \left<X - \alpha\right> \forall \alpha \in \currfield^\times\} = \{X^i : i \geq 0\}$$
and on the other hand: if $I' \subset R$ is an ideal, there is an ideal $I \subset \currfield[X]$ such that $I' = S_{X}^{-1} I$. This implies every ideal is principal in $R$. In $\currfield[X]$ each element $\mathfrak{p} \in \trm{Spec}(\currfield[X])$ gets mapped to $\left<X\right>$ via $\partial$, as every element $p (X - \alpha)$ in a prime ideal $\left<X - \alpha\right>$ has an image in $\left<X\right>$:
$$p (X - \alpha) \longmapsto \partial(p) (X - \alpha) + \sqrt{a} p X,\ \forall \alpha \in \currfield,\ p \in \currfield[X]$$
where the constant coefficient $\left(\partial(p)\right)_0 = 0 \LRA p_0 \in \ker \partial \LRA \partial(p) \in \left<X\right>$. This shows the differential $\currfield$-algebra $\left(\currfield[X], \partial = \sqrt{a} X \cdot \frac{d}{d X}\right)$ has only one differential prime ideal, $\left<X\right>$ (- in our case even maximal). However, the localization cancels this as its generator $X$ is a unit in $R$ (using variable notation $X, y_1$ interchangeably). Therefore, our rings $R_1, R_{-1}$ are indeed simple differential rings - or PV. In words of $D$-module algebra, $R_1, R_{-1}$ are simple $D$-rings, for $D = \currfield[\partial]$.\\
\indent To conclude this paragraph, we want to show that $\currfield(x_1,x_2) \simeq \currfield(y_1)$ as differential field extensions over $\currfield$. To achieve this we need to show that the maps:
$$\bao{rrcl}
\Phi : & \currfield(y_1) &\longrightarrow& \currfield(x_1,x_2)\\
& y_1 &\longmapsto & \sqrt{a} x_1 + x_2\\
& y_1^{-1}&\longmapsto & \sqrt{a} x_1 - x_2\\
&&&\\
\Psi : &\currfield(x_1,x_2) &\longrightarrow& \currfield(y_1)\\
& x_1 &\longmapsto& \frac{1}{2 \sqrt{a}} (y_1 + y_1^{-1})\\
& x_2 &\longmapsto& \frac{1}{2} (y_1 - y_1^{-1})\\
\ea$$\label{PVisomorph}
are bijective and inverse to one another as differential $\currfield$-algebra homomorphisms (or $D$-module algebra homomorphisms). Clearly, the two $\currfield$-vector spaces $V_1 := \currfield.y_1 \oplus \currfield.y_1^{-1}$ and $V_2 := \currfield.x_1 \oplus \currfield.x_2$ are isomorphic by simple basis change induced by the restrictions $\mid_{V_i}$ of the above $\currfield$-vector space homomorphisms. Furthermore, $\Psi \mid_{V_2} \Phi \mid_{V_1} = id_{V_1}$ and $\Phi \mid_{V_1} \Psi \mid_{V_2} = id_{V_2}$ and both are $D$-modules. Hence, the basis change respects this property as shown above. This property is kept for the recursively defined family of $D$-modules:
$$V_{1,i} := V_{1,i-1} \oplus \currfield.y_1^i \oplus \currfield.y_1^{-i},\ V_{2,i} := V_{2,i-1} \oplus \currfield.x_1^i \oplus \currfield.x_1^{i-1} x_2, \forall i \geq 1$$
and $V_{j,0} = \currfield$ for $j = 1, 2$. We get:
$$\bao{rclclcl}
\bigcup_{i \geq 0} V_{1, i} &\simeq& \currfield[y_1,X]/\left<y_1 X - 1\right> &\simeq& \currfield[y_1,y_1^{-1}] &=:& R_1\\
&&&&\\
\bigcup_{i \geq 0} V_{2,i} &\simeq& \currfield[x_1,x_2]/\left<x_2^2 - a x_1^2 + 1\right> &\simeq& \currfield[x_1,\sqrt{a x_1^2 - 1}] &=:& R_2,\\
\ea$$
i.e. a $D$-stable filtration for each subalgebras $R_1, R_2$. Having already shown their simplicity we only need surjectivity as $\Psi\mid_{V_{2,0}} = id_{V_{2,0}}, \Phi\mid_{V_{1,0}} = id_{V_{1,0}}$ already implies injectivity. Let $f$ and $g$ equal $\sum_i \left(f_{1,i} y_1^i + f_{-1,i} y_1^{-i}\right) \in R_1$ and $\sum_i \left(g_{1,i} x_1^i + g_{2,i-1} x_1^i x_2\right) \in R_2$, respectively. Obviously, $\sum \left(f_{1,i} (\sqrt{a} x_1 + x_2)^i + f_{-1,i} (\sqrt{a} x_1 - x_2)^i\right)$ and
$\sum (y_1 + y_1^{-1})^i \left(\frac{g_{1,i}}{2 \sqrt{a}} + \frac{g_{2,i}}{2} (y_1 - y_1^{-1})\right)$ are elements in their respective preimages. This extends naturally to their localizations.
%Hence, any ideal stable under $\partial$ is, on the one hand, generated by a single element, on the other hand, $\deg f = \deg \partial(f)$, where $\deg = \left[f = \sum_{i=-m}^n f_i X^i \longmapsto \max(m,n)\right]$. In particular, $\left<f, \partial f\right>$ is also principal and a subideal of $I$.
\paragraph{Galois group of the subfields}
As above $\trm{DGal}(\currfield[y_1,y_{-1}]/\currfield) = \{a \in \trm{Gl}_1(\currfield) : \partial a = a \partial\}$. Obviously, the unit group of $\currfield$ is our differential Galois group.
% However, both generators are eigenvectors. Hence, the diagonal matrix
%$$A_{L_++L_-} = \left(\bao{cc}
%\sqrt{a} & 0\\
%0 & -\sqrt{a}\\
%\ea\right)$$
%represents $\partial$ on $S(L_+) \oplus S(L_-)$ with canonical basis vectors identified with $y_1, y_{-1}$, respectively. Clearly, all matrices with diagonal entries are the only ones fulfilling our definition. We get
%$$\trm{DGal}(\currfield[y_1,y_{-1}]/\currfield) \simeq \currfield^\times \times \currfield^\times.$$
\paragraph{Conclusion}
Given our above example, the PV-ring and field are
$$R = \currfield[y_1,y_{-1} := y_1^{-1}],\ K = S^{-1}R = \currfield(y_1),$$
respectively, with differential Galois group
$$\trm{DGal}(\currfield[y_1,y_{-1}]/\currfield) \simeq \currfield^\times.$$
The differential ring $\currfield[x_1,x_2] = \trm{Sym}(L)/\left<x_2^2 - a x_1^2 + 1\right> \simeq \trm{Sym}(\currfield^2)/\left<e_2^2 - a e_1^2 + 1\right>$ is isomorphic to
$$\currfield[y_1] \oplus \currfield[y_{-1}] \simeq \currfield[y_1]^2.$$
\bmk %The results are valid in case $a, \sqrt{a} \in \currfield[z]\bsl\currfield$, if our matrix equation gets slightly modified:
%$$A_L = \left(\bao{cc}
%0 & 1\\
%a \pm \partial(\sqrt{a}) & 0\\
%\ea\right) \in \trm{Gl}_2(\currfield(z))\LRA L(y) = \partial^2(y) - \left(a \mp \partial(\sqrt{a})\right)y = 0,\ \forall y \in S(L).$$
%The different signs arise from two isomorphic solution spaces $S_1 = S(L_+) \oplus S(L_-)$, $S_2 = S(L_-) \oplus S(L_+)$, or equivalently, by the order of operator evaluation:
%$$S_1 \simeq D/D.(\partial - \sqrt{a})(\partial + \sqrt{a}),\ S_2 \simeq D/D.(\partial + \sqrt{a})(\partial - \sqrt{a}),\ \trm{where}\ D := \currfield(z)[\partial], \partial(z) = 1.$$
A more exhaustive approach to $\partial^2 - a \cdot id$, where $a$ is not a constant, is given in \cite{vdPS01} and \cite{CohCuySte}.\\
\indent Unfortunately, our given examples are all linear homogeneous ODEs. The theory, we are going to present in the next chapter, is more general. It includes theory for general ODEs (linear, non-linear) in characteristic zero, (multivariate) iterative derivations in positive characteristic and difference equations in arbitrary characteristic.\\
\indent Nevertheless, as the general theory does not require the ring/field of constants to be algebraically closed we may examine a broader setting.
%though each factor is not differentially closed (i.e. $(1,0) \stackrel{\partial}{\longmapsto} (0,1) \stackrel{\partial}{\longmapsto} (a,0)$).
%We want to see if the following $\currfield$-linear maps are differentially invariant:
%$$\sigma_1 = [\pm y_1 \longmapsto \mp y_1,\ \pm y_1^{-1} \longmapsto \mp y_1^{-1}] \in \trm{Gl}(R_1)$$
%$$\sigma_2 = [y_1 \longmapsto y_1^{-1}] \in \trm{Gl}(R_1)$$
%Since $\pm y_1$ and $\pm y_1^{-1}$ have the same eigenvalue $\sqrt{a}$ and $-\sqrt{a}$ respectively, we have that $\sigma_1$ commutes with $\partial$. Hence, $\sigma_1 \in \trm{DGal}(M_1/\currfield)$. On the other hand, $\sigma_2(\partial(y_1)) = \sigma_2(\sqrt{a} y_1) = \sqrt{a} y_1^{-1} \neq \partial(\sigma_2(y_1)) = \partial(y_1^{-1}) = -\sqrt{a} y_1^{-1}$. %Now, we may express $v = \sum_{i \in \{1,2\}} v_i x_i \in \trm{Sol}(\partial^2 - a \cdot id)$ as linear combinations of $v = \sum_{i \in \{\pm 1\}} v'_i y_i$:
%$$\bao{rrcl}
%M_{B_x}^{B_y}(id):& v &=& v'_1 y_1 + v'_{-1} y_{-1}\\
%& &=& v'_1 (\sqrt{a} x_1 + x_2) + v'_{-1} (\sqrt{a} x_1 - x_2)\\
%&&&\\
%&&=& \sqrt{a} (v'_1 + v'_{-1}) x_1 + (v'_1 - v'_{-1}) x_2\\
%&&&\\
%M_{B_y}^{B_x}(id)&v &=& v_1 x_1 + v_{2} x_{2}\\
%&&=& \frac{v_1}{2 \sqrt{a}}(y_1 + y_{-1}) + \frac{v_{2}}{2} (y_1 - y_{-1})\\
%&&&\\
%&&=& \frac{1}{2 \sqrt{a}} \left[(v_1 + \sqrt{a} v_2) y_1 + (v_1 - \sqrt{a} v_2) y_{-1}\right]\\
%\ea$$
%Hence, we have the following basis transformation matrices:
%$$M_{B_x}^{B_y} = \left(\bao{cc}
%\sqrt{a} & \sqrt{a}\\
%1 & -1\\
%\ea\right),\ \ M_{B_y}^{B_x} = \frac{1}{2 \sqrt{a}}\left(\bao{cc}
%1 & \sqrt{a}\\
%1 & -\sqrt{a}\\
%\ea\right)$$
%The conjugate $M_{B_y}^{B_x} G M_{B_x}^{B_y}$ of $G:= \trm{DGal}(\currfield(x_1,x_2)/\currfield)$ defines a group action on $S(L_+) \oplus S(L_-)$ or equivalently, makes $S(L_+) \oplus S(L_-)$ a $G$-module:
%$$\bao{rrcl}
%\alpha : &G \times \left(S(L_+) \oplus S(L_-)\right)& \longrightarrow &S(L_+) \oplus S(L_-)\\
%&&&\\
%&\left(g := \left(\bao{cc}
%g_{11} & g_{12}\\
%a g_{12} & g_{11}\\
%\ea\right), v'_1 y_1 + v'_{-1} y_{-1}\right) &\longmapsto&M_{B_y}^{B_x} g %\left(\bao{cc}
%g_{11} & g_{12}\\
%a g_{12} & g_{11}\\
%\ea\right)
%M_{B_x}^{B_y}\left(\bao{c}
%v'_1\\
%v'_{-1}\\
%\ea\right)\\
%\ea$${\footnotesize
%$$M_{B_y}^{B_x}g %\left(\bao{cc}
%g_{11} & g_{12}\\
%a g_{12} & g_{11}\\
%\ea\right)
%M_{B_x}^{B_y}\left(\bao{c}
%v'_1\\
%v'_{-1}\\
%\ea\right) = \frac{1}{2 \sqrt{a}}\left(\bao{cc}
%2 \sqrt{a} g_{11} + (1 + a^2) g_{12}& -(1 - a^2) g_{12}\\
%(1 - a^2) g_{12} & 2 \sqrt{a} g_{11} - (1 + a^2) g_{12}\\
%\ea\right) \left(\bao{c}
%v'_1\\
%v'_{-1}\\
%\ea\right)\\
%$$}
%The fixed field $\currfield(y_1)^G$ is
%Multiplying $T$ with the generator of $\trm{DGal}(\currfield(x_1,x_2)/\currfield)$ we get:
%$$T g = \left(\bao{cc}
%\sqrt{a} & 1\\
%\sqrt{a} & -1\\
%\ea\right) \left(\bao{cc}
%g_{11} & g_{12}\\
%a g_{12} & g_{11}\\
%\ea\right) = \left(\bao{cc}
%\sqrt{a} g_{11} + a g_{12} & \sqrt{a} g_{12} + g_{11}\\
%\sqrt{a} g_{11} - a g_{12} & \sqrt{a} g_{12} - g_{11}\\
%\ea\right)$$
%Analogously to the classical Galois theory, there exists subgroups $H$ in $\trm{DGal}(\currfield(x_1,x_2)/\currfield)$ such that $\currfield(x_1,x_2)^H := \{x \in K : \sigma(x) = x \forall \sigma \in H\}$ is a Picard-Vessiot field over $\currfield$. Conversely, the $\trm{DGal}(\currfield(x_1,x_2)/M)$ is a (closed) subgroup of $\trm{DGal}(\currfield(x_1,x_2)/\currfield)$ for any intermediate Picard-Vessiot field $\currfield \subset M \subset \currfield(x_1,x_2)$. %Since we already have two Picard-Vessiot subfields $M_i = \currfield(y_i)$ of $K$, we only need to compute the Galois groups of both differential fields. | {
"alphanum_fraction": 0.6569545048,
"avg_line_length": 92.1064467766,
"ext": "tex",
"hexsha": "dfbfb0c7c87fbbfa37cd515df8b9ee0f25a2a097",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1a3fab54f2e03d9ce656f9b8a5b58e26c3c93a02",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "gmuel/texlib",
"max_forks_repo_path": "Script_Diff_Gal07/diffRingNMods.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1a3fab54f2e03d9ce656f9b8a5b58e26c3c93a02",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "gmuel/texlib",
"max_issues_repo_path": "Script_Diff_Gal07/diffRingNMods.tex",
"max_line_length": 942,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1a3fab54f2e03d9ce656f9b8a5b58e26c3c93a02",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "gmuel/texlib",
"max_stars_repo_path": "Script_Diff_Gal07/diffRingNMods.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 23340,
"size": 61435
} |
\section{Swimming}\label{sec:swimming}
\textbf{Skill, Passive, Source(10 Gold), Repeatable}\\
You can add your relevant game mode level to checks concerning moving in water as well as other fluids.
This is also useful for diving for a long period of time, fighting against a current, or appraising how rough the currents in a body of water are. | {
"alphanum_fraction": 0.7877906977,
"avg_line_length": 86,
"ext": "tex",
"hexsha": "09b6e3b8899a630cca8e70b0e305a7b1dfa88f22",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "NTrixner/RaggedLandsPenAndPaper",
"max_forks_repo_path": "perks/skills/swimming.tex",
"max_issues_count": 155,
"max_issues_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95",
"max_issues_repo_issues_event_max_datetime": "2022-03-03T13:49:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-03-18T13:19:57.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "NTrixner/RaggedLandsPenAndPaper",
"max_issues_repo_path": "perks/skills/swimming.tex",
"max_line_length": 146,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "NTrixner/RaggedLandsPenAndPaper",
"max_stars_repo_path": "perks/skills/swimming.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-03T09:32:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-13T09:33:31.000Z",
"num_tokens": 82,
"size": 344
} |
Subsets and Splits