Search is not available for this dataset
text
string | meta
dict |
---|---|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Deedy - One Page Two Column Resume
% LaTeX Template
% Version 1.1 (30/4/2014)
%
% Original author:
% Debarghya Das (http://debarghyadas.com)
%
% Author:
% Aayush Shaurya
% Forked on 20/1/2017
% Version 1.19
% Last Updated:
% 15:13AM 20/1/2017
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Known Issues:
% 1. Overflows onto second page if any column's contents are more than the
% vertical limit
% 2. Hacky space on the first bullet point on the second column.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[]{deedy-resume-openfont}
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LAST UPDATED : 20/1/2017
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% \lastupdated
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TITLE NAME
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\namesection{}{Aditi Sinha}{\href{mailto:[email protected]}{[email protected]} | +91~9928~086877
}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% COLUMN ONE
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{minipage}[t]{0.33\textwidth}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% EDUCATION
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Education}
\subsection{M.Sc, Economics Hons}
\descript{BITS Pilani, Rajasthan |}
\location{2018}\\
CGPA: 8.03/10.00
\sectionsep
\subsection{CBSE (Class XII)}
\descript{International School, Patna |}
\location{2013}\\
Performance: 90\%
\sectionsep
\subsection{CBSE (Class X)}
\descript{Notre Dame Academy, Patna |}
\location{2011}\\
CGPA: 10.00/10.00
\sectionsep
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% ELECTIVES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Electives}
Derivatives and Risk Management, Securities and Portfolio Management, International Business, Marketing Research, Principles of Management
\sectionsep
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% POSITIONS OF RESPONSIBILITY
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Positions of Responsibility}
\descript{Coordinator,}
\descriptbf{TEDxBITSPilani}\\
\hfill Jan'16 -- Jan'17 \\
\textbullet Led a 40 membered 3 tier team to organise a TEDx conference with 12 speakers, having an approximate budget of Rs. 5 lacs
\sectionsep
\descript{Core Team Member,}
\descriptbf{Wall Street Club,}
\descript{BITS Pilani}\\
\hfill Sep'15 -- Mar'16 \\
\textbullet Invested in BSE and NSE using fundamental and technical analysis of stocks
% \footnotesize \textit{\textbf{(Research Asst. \& Teaching Asst) }}
\sectionsep
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% EXTRA CURRICULAR ACTIVITIES
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Extra-Curricular Activities}
\begin{itemize}
\item Volunteered in the NGO Sankalp Jyoti in the skill development training and employment generation of 300 women \\ \hfill June'15
\item Volunteered in the NGO Helpage India to secure 50 associates for the Helpage AdvantAge Scheme for the elderly \\ \hfill July'16
\item Group Leader, Department of Sponsorship and Marketing, Oasis and Apogee \\ \hfill Jan'15 -- Oct'15
\begin{itemize}
\item Secured sponsorship worth Rs.3 lac
s from various stakeholders such as Birla Century Textiles, TVS tyres etc.
\end{itemize}
\end{itemize}
\sectionsep
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% COLUMN TWO
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{minipage}
\hfill
\begin{minipage}[t]{0.66\textwidth}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% EXPERIENCE
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Experience}
\subsection{Internships}
\descript{Winter Research Internship,}
\descriptbf{Indian School of Business}
\descript{|}
\location{Mohali, India}
\hfill Dec'16\\
\vspace{\topsep}
\begin{tightemize}
\item Data Analysis on \emph{Stata} of the skill development schemes by NSDC for the Indian states
\item Secondary Research on the Trump-Admin Reform (Financial Choice Act) of the Dodd-Frank Act
\end{tightemize}
\vspace{0.2em}
\descript{Field Internship,}
\descriptbf{Vision India Foundation}
\descript{|}
\location{Pilani, India}
\hfill Sep'16 -- Oct'16\\
\begin{tightemize}
\item Used primary and secondary research to build a case study on \emph{e-Mitra}, a Rajasthan government e-initiative (largest in India)
\item Used a data-oriented approach to solve current-day challenges for documenting good governance
\end{tightemize}
\vspace{0.2em}
\descript{Winter Internship,}
\descriptbf{Zoukloans}
\descript{|}
\location{Delhi, India}
\hfill Dec'15\\
\begin{tightemize}
\item Data Analysis on Excel to predict the footfall of 500 Delhi restaurants using data from Zomato
\end{tightemize}
\vspace{0.2em}
\descript{Summer Internship,}
\descriptbf{AirTel}
\descript{|}
\location{Patna, India}
\hfill July'15\\
\begin{tightemize}
\item Assisted managers in organising Marketing Events and conducting Market Research for the launch of Airtel 4G
\item Solved problem statements in Airtel Own Retail and learnt customer management via 50 interactions, 20 calls and 20 e-mails
\end{tightemize}
\sectionsep
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% RESEARCH
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Research}
\descriptbf{Public Sector Efficiency of Indian States}
\emph{(co-authored with Dr. Varun Chotia)}
\begin{tightemize}
\item Calculated public sector performance and efficiency of the Indian states via performance indicators from 2010-2013
\end{tightemize}
\vspace{0.5em}
\descriptbf{Challenges faced by Rural and Semi-Urban Retail}
\begin{tightemize}
\item Presented at the Indian Retail Conference, New Delhi (25th March, 2016)
\item Conducted descriptive and casual research in areas near Delhi and Pilani and gave corresponding solutions
\end{tightemize}
\vspace{0.5em}
\descriptbf{Comparative Portfolio Management Strategy across Different Income Classes}\\
\begin{tightemize}
\item Won 2\textsuperscript{nd} prize in Apogee Paper Presentation for the paper in the Eco-Fin Category amongst 100 students
\item Charted recommendations for an optimal portfolio depending on the person’s investing behavior according to the modern portfolio theory and reviewed studies on asset classes
\end{tightemize}
\sectionsep
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% PROJECT
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Projects}
\descriptbf{Rural Women Empowerment}
\emph{(under Dr. Sangeeta Sharma)} \hfill Aug'16 -- Nov'16\\
\begin{tightemize}
\item Educated about 200 rural women about safe sex, abortion, STDs etc. sanitation and hygiene via an initiative of Rajasthan Government to produce low cost sanitary napkins
\item Implemented ways to generate employment and promote self-employment for victims of sexual assaults
\end{tightemize}
\vspace{0.2em}
\descriptbf{Affect of Assortment on Restaurant Sales} \emph{(team of 3)} \hfill Aug'16 -- Nov'16\\
\begin{tightemize}
\item Consulting Project with Annapurna, a North-Indian Restaurant in BITS Pilani campus
\item Increased market share from 6\% to 11\% with a marketing strategy turnaround
\item Investigated reasons for drop in patronage and built a statistical model by using primary descriptive research
\end{tightemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% ACHIEVEMENTS
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Achievements}
\vspace{0.5em}
\begin{tightemize}
\item \descript{Core member,} \descriptbf{Business Development, Hyperloop India,} India’s largest multi-disciplinary, multi-campus student team \hfill Ongoing\\
\begin{tightemize}
\vspace{\topsep}
\item \footnotesize {Qualified for the semi-final round (35/2600 teams in the world) of the Hyperloop One Global Challenge conducted by Hyperloop One for composing a case to bring Hyperloop to India}
\end{tightemize}
\vspace{0.5em}
\item Recipient of the \descript{Merit scholarship} from State Bank of India for outstanding academic performance \hfill 2014 -- 2016\\
\vspace{0.2em}
\item Selected as a Student Delegate (300 delegates), \descript{World Business Dialogue} in Cologne, Germany \hfill March 2016\\
\vspace{0.2em}
\item Selected as the Indian Representative (700 students), \descript{International Student Energy Summit} in Bali, Indonesia \hfill June 2015
\end{tightemize}
\sectionsep
\end{minipage}
\end{document} \documentclass[]{article} | {
"alphanum_fraction": 0.671222846,
"avg_line_length": 31.8832684825,
"ext": "tex",
"hexsha": "6778a4fa148fb14b87a1384f20ca91410930b4ca",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2f359d46e14dffa94696d068a3de6917aa4b22ec",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "aditi1002/resume",
"max_forks_repo_path": "main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2f359d46e14dffa94696d068a3de6917aa4b22ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "aditi1002/resume",
"max_issues_repo_path": "main.tex",
"max_line_length": 199,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2f359d46e14dffa94696d068a3de6917aa4b22ec",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "aditi1002/resume",
"max_stars_repo_path": "main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2133,
"size": 8194
} |
\section{201503-4}
\input{problem/4/201503-4-p.tex} | {
"alphanum_fraction": 0.7307692308,
"avg_line_length": 17.3333333333,
"ext": "tex",
"hexsha": "ae0d103807aa1a9e5fed45fe61502c89450df831",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "xqy2003/CSP-Project",
"max_forks_repo_path": "problem/4/201503-4.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "xqy2003/CSP-Project",
"max_issues_repo_path": "problem/4/201503-4.tex",
"max_line_length": 32,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "26ef348463c1f948c7c7fb565edf900f7c041560",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "xqy2003/CSP-Project",
"max_stars_repo_path": "problem/4/201503-4.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-14T01:47:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-14T01:47:19.000Z",
"num_tokens": 22,
"size": 52
} |
\lab{Serialization}{Serialization}
\objective{Learn about JSON and XML.}
\label{lab:webtech}
In order for computers to communicate one with another, they need standardized ways of storing structured data.
For example, suppose you have a python list that you want to send to somebody else. How would you store it outside of the interpreter?
However we choose to store our list, we need to be able to load it back into the Python interpreter and use it as a list.
What if we wanted to store more complex objects?
The process of serialization seeks to address this situation.
Serialization is the process of storing an object and its properties in a form that can be saved or transmitted and later reconstructed back into an identical copy of the original object.
\subsection*{JSON}
JSON, pronounced ``Jason'', stands for \emph{JavaScript Object Notation}.
This serialization method stores information about the objects as a specially formatted string.
It is easy for both humans and machines to read and write the format.
When JSON is deserialized, the string is parsed and the objects are recreated.
Despite its name, it is a completely language independent format.
JSON is built on top of two types of data structures: a collection of key/value pairs and an ordered list of values.
In Python, these data structures are more familiarly called dictionaries and lists respectively.
Python's standard library has a module that can read and write JSON.
In general, the JSON libraries of various languages have a fairly standard interface.
If performance is critical, there are Python modules for JSON that are written in C such as ujson and simplejson.
Let's begin with an example.
\begin{lstlisting}
>>> import json
>>> json.dumps(range(5))
'[0, 1, 2, 3, 4]'
>>> json.dumps({'a': 34, 'b': 483, 'c':"Hello JSON"})
'{"a": 34, "c": "Hello JSON", "b": 483}'
\end{lstlisting}
As you can see, the JSON representation of a Python list and dictionary are very similar to their respective string representations.
You can also see that each JSON message is enclosed in a pair of curly braces.
We can even nest multiple messages.
\begin{lstlisting}
>>> a = """{"car": {
"make": "Ford",
"model": "Focus",
"year": 2010,
"color": [255, 30, 30]
}
}"""
>>> t = json.loads(a)
>>> print t
{u'car': {u'color': [255, 30, 30], u'make': u'Ford', u'model': u'Focus', u'year': 2010}}}
>>> print t['car']['color']
[255, 30, 30]
\end{lstlisting}
Most JSON libraries support the dump[s]/load[s] interface.
To generate a JSON message, we use \li{dump} which will accept the Python object and generate the message and write it to a file.
\li{dumps} does the same, but just returns the string rather than writing it to a file.
To perform the inverse operation, we use \li{load} or \li{loads} for reading from a file or string respectively.
The built-in JSON encoder/decoder only has support for the basic Python data structures such as lists and dictionaries.
Trying to serialize a set will result in an error
\begin{lstlisting}
>>> a = set('abcdefg')
>>> json.dumps(a)
---------------------------------------------------------------------------
TypeError: set(['a', 'c', 'b', 'e', 'd', 'g', 'f']) is not JSON serializable
\end{lstlisting}
The serialization fails, because the JSON encoder doesn't know how it should represent the set as a string.
We can extend the JSON encoder by subclassing it and adding support for sets.
Since JSON has support for sequences and maps, one easy way would be to express the set as a map with one key that tells us the data structure type, and the other containing the data in a string.
Now, we can encode our set.
\begin{lstlisting}
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return {'dtype': 'set',
'data': list(obj)}
return json.JSONEncoder.default(self, obj)
>>> s = json.dumps(a, cls=CustomEncoder)
>>> s
'{"dtype": "set", "data": ["a", "c", "b", "e", "d", "g", "f"]}'
\end{lstlisting}
However, we want a Python set back when we decode.
JSON will happily return our dictionary, but the data will be in a list.
How do we tell it to convert our list back into a set?
The answer is to build a custom decoder.
Notice that we don't need to subclass anything.
\begin{lstlisting}
accepted_dtypes = {'set': set}
def custom_decoder(dct):
dt = accepted_dtypes.get(dct['dtype'], None)
if dt is not None and 'data' in dct:
return dt(dct['data'])
return dct
>>> json.loads(s, object_hook=custom_decoder)
{u'a', u'b', u'c', u'd', u'e', u'f', u'g'}
\end{lstlisting}
Many websites and web APIs make extensive use of JSON.
Twitter, for example, return JSON messages for all queries.
\begin{problem}
Python has a module in the standard library that allows easy manipulation of times and dates. The functionality is built around a datetime object
However, datetime objects are not JSON serializable.
Determine how best to serialize and deserialize a datetime object, then write a custom encoder and decoder.
The datetime object you serialize should be equal to the datetime object you get after deserializing.
\label{prob:datetime_json}
\end{problem}
\subsection*{XML}
XML is another data interchange format.
It is a markup language rather than a object notation language.
Broadly speaking, XML is somewhat more robust and versatile than JSON, but less efficient.
To understand XML, we need to understand what tags are.
A tag is a special command enclosed in angled brackets ($<$ and $>$) that describe properties of the data enclosed.
For example, we can represent our car from above in the XML below.
\begin{lstlisting}[language=XML]
<car>
<make>Ford</make>
<model>Focus</model>
<year>2010</year>
<color model='rgb'>255,30,30</color>
</car>
\end{lstlisting}
We can read XML data as a tree or as a stream.
Since XML is a hierarchical storage format, it is very easy to build a tree of the data.
The advantage is random access to any part of the document at any time.
However, all of the XML must be loaded into memory to build this tree.
To alleviate the burden of loading a large XML document into memory all at once, we can read the file sequentially.
When streaming the XML data, we are only reading a small chunk of the file at a time.
There is no limit to size of XML document that we can process this way as memory usage will be constant.
However, we sacrifice the random access that the tree gives us.
\subsection*{DOM}
The DOM (Document Object Model) API allows you to work with an XML document as a tree.
Python's XML module includes two version of DOM: \li{xml.dom} and \li{xml.minidom}.
MiniDOM is a minimal, more simple implementation of the DOM API.
The motivation behind DOM is to represent an XML as a hierarchy of elements.
This is accomplished by building a tree of the elements as the XML tags are read from the file.
The DOM tree of the car above would have \li{<car>} at the root element.
This root element would have four children, \li{<make>}, \li{<model>}, \li{<year>}, and \li{<color>}.
We would traverse this DOM tree just like we would any other tree structure.
DOM trees can be searched by tag as well.
\subsection*{SAX}
SAX, Simple API for XML, is a very fast, efficient way to read an XML file.
The main advantage of this method for reading an XML file is memory conservation.
A SAX parser reads XML sequentially instead of all at once.
As the SAX parser iterates through the file, it emits events at either the start or the end of tags.
You can provide functions to handle these events.
\subsection*{ElementTree}
ElementTree is Python's unification of DOM and SAX into a single, high-level API for parsing and creating XML.
ElementTree provides a SAX-like interface for reading XML files via its \li{iterparse()} method.
This will have all the benefits of reading XML via SAX.
In addition to stream processing the XML, it will build the DOM tree as it iterates through each line of the XML input.
ElementTree provides a DOM-like interface for reading XML files via its \li{parse()} method.
This will create the tag tree that DOM creates.
We will demonstrate ElementTree using the following XML.
\lstinputlisting[style=FromFile,language=XML]{contacts.xml}
First, we will look at viewing an XML document as a tree similar to the DOM model described above.
\begin{lstlisting}
import xml.etree.ElementTree as et
f = et.parse('contacts.xml')
# manually traversing the tree
# we iterate through the element directly
# getchildren() is old and deprecated (not supported).
root = f.getroot()
children = list(root) # root has three children
person0 = children[0]
fields = list(person0) # the children elements of person0
# we can search the entire tree for specific elements
# searching for all tags equal to firstname
for n in root.iter('firstname'):
print n.text
# we can also filter with multiple tags
# notice we use a set lookup in the conditional inside the generator expression
fields = {'firstname', 'lastname', 'phone'}
fi = (x for x in root.iter() if x.tag in fields)
for n in fi:
print n.text
# we can even modify the document tree inplace
# let's remove Thor
# refer to the documentation of ElementTree for adding elements
for n in root.findall("person"):
if n.find("firstname").text == 'Thor':
root.remove(n)
# verify that Thor is really gone
for n in root.iter('firstname'):
print n.text
\end{lstlisting}
Next, we will look at ElementTree's \li{iterparse()} method.
This method is very similar to the SAX method for parsing XML.
There is one important difference.
ElementTree will still build the document tree in the background as it is parsing.
We can prevent this by clearing each element by calling its \li{clear()} method when are finished processing it.
\begin{lstlisting}
f = et.iterparse('contacts.xml') # this is an iterator
for event, tag in f:
print "{}: {}".format(tag.tag, tag.text)
tag.clear()
# we can get both start and end events
# however, start events are mostly useful for looking at attributes
# or to trigger some other action on element starts.
# The element is not guarenteed to be complete until the end event.
for event, tag in et.iterparse('contacts.xml', events=('start', 'end')):
print "{} {}<{}>: {}".format(event, tag.tag, tag.attrib, tag.text)
\end{lstlisting}
\begin{problem}
Using ElementTree to parse books.xml, answer the following questions. Include the code you used with yout answers.
1) Who is the author of the most expensive book in the list?
2) How many of the books were published before May 1, 2000?
3) Which books reference Microsoft in their descriptions?
\end{problem}
| {
"alphanum_fraction": 0.7374427088,
"avg_line_length": 46.4826086957,
"ext": "tex",
"hexsha": "df42fa67eab53cbcdd489267e3f85f367017e017",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-05T14:45:03.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-05T14:45:03.000Z",
"max_forks_repo_head_hexsha": "9f474e36fe85ae663bd20e2f2d06265d1f095173",
"max_forks_repo_licenses": [
"CC-BY-3.0"
],
"max_forks_repo_name": "joshualy/numerical_computing",
"max_forks_repo_path": "Vol3B/WebTech1-Serialization/WebTech1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9f474e36fe85ae663bd20e2f2d06265d1f095173",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-3.0"
],
"max_issues_repo_name": "joshualy/numerical_computing",
"max_issues_repo_path": "Vol3B/WebTech1-Serialization/WebTech1.tex",
"max_line_length": 195,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9f474e36fe85ae663bd20e2f2d06265d1f095173",
"max_stars_repo_licenses": [
"CC-BY-3.0"
],
"max_stars_repo_name": "joshualy/numerical_computing",
"max_stars_repo_path": "Vol3B/WebTech1-Serialization/WebTech1.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2536,
"size": 10691
} |
We will now explain how to fill a data file.
First you must specify some basic information like the dimension of your domain, its name, the problem type...
To define the problem dimension, we use the following keyword:
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
{\bf{Dimension}} \textit{2} or {\bf{Dimension}} \textit{3}
\end{alltt}
\end{minipage}}
\end{center}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Problems} \label{pbs}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
You have to define the problem type that you wish to solve.
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
{\bf{Pb\textit{\_type}}} \textit{my\_problem}
\end{alltt}
\end{minipage}}
\end{center}
Here are some of the available problem types:
\begin{itemize}
%\item \textbf{Pb\_Hydraulique$\left[\mbox{\_Turbulent}\right]$}
%\item \textbf{Pb\_Thermohydraulique$\left[\mbox{\_Turbulent}\right]$}
%\item \textbf{Pb\_Hydraulique\_Concentration$\left[\mbox{\_Turbulent}\right]$}
\item for incompressible flow: \textbf{Pb\_$\left[\mbox{\textcolor{magenta}{Thermo}}\right]$hydraulique$\left[\mbox{\textcolor{darkblue}{\_Concentration}}\right]\hspace{-0.15cm}\left[\mbox{\textcolor{Greeen}{\_Turbulent}}\right]$},
\item for quasi-compressible flow: \textbf{Pb\_Thermohydraulique$\left[\mbox{\textcolor{Greeen}{\_Turbulent}}\right]$\_QC},
%\item for quasi-compressible flow:\\
% \textbf{Pb\_Thermohydraulique$\left[\mbox{\textcolor{Greeen}{\_Turbulent}}\right]$\_QC$\left[\mbox{\textcolor{mauve}{\_fraction\_massique}}\right]$},
\item for solid: \textbf{Pb\_Conduction},
\item you can find all \href{\REFERENCEMANUAL\#Pbbase}{problem types} in the Reference Manual.
\end{itemize}
where:
\begin{itemize}
\item \textbf{hydraulique}: means that we will solve Navier Stokes equations without energy equation,
\item \textbf{\textcolor{magenta}{Thermo}}: means that we will solve Navier Stokes equations with energy equation,
\item \textbf{\textcolor{darkblue}{Concentration}}: that we will solve multiple constituent transportation equations,
\item \textbf{\textcolor{Greeen}{Turbulent}}: that we will simulate a turbulent flow and specify a turbulent model (RANS or LES). (Since version v1.8.0, \textcolor{Greeen}{Turbulence} models are in TrioCFD not in TRUST).
\item \textbf{Conduction}: resolution of the heat equation,
\item \textbf{QC}: Navier Stokes equations with energy equation for quasi-compressible fluid under low Mach approach,
%\item \textbf{\textcolor{mauve}{fraction\_massique}}: hydraulic and energy equations are solved and a list of passive scalar equations may be added.
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Domain definition}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
To define the domain, you must name it. This is done thanks to the following block:
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
{\bf{Domaine}} \textit{my\_domain}
\end{alltt}
\end{minipage}}
\end{center}
Then you must add your mesh to your simulation.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Mesh} \label{Mesh}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Notice the presence of the tags:
\begin{alltt}
\textcolor{blue}{\# BEGIN MESH \#}
...
\textcolor{blue}{\# END MESH \#}
\end{alltt}
in the data file of section \ref{data}.
This is necessary for parallel calculation (see section \ref{parallel}).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Allowed meshes}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\trust allows:
\begin{itemize}
\item quadrangular or triangular undeformed meshing for 2D cases (Figure \ref{2D_mesh}),
\begin{figure}[h!]
\begin{center}
\begin{tikzpicture}[scale=2, line width=1pt]
\coordinate (A) at (0,0) ;
\coordinate (B) at (1.5,0) ;
\coordinate (C) at (1.5,1) ;
\coordinate (D) at (0,1) ;
\draw[black] (A) -- (B) -- (C) -- (D) -- cycle ;
\coordinate (E) at (3,0) ;
\coordinate (F) at (4.5,0) ;
\coordinate (G) at (3.75,1) ;
\draw[black] (E) -- (F) -- (G) -- cycle ;
\end{tikzpicture}
\caption{2D allowed elements}
\label{2D_mesh}
\end{center}
\end{figure}
\item hexahedral or tetrahedral undeformed meshing for 3D cases (Figure \ref{3D_mesh}).
\begin{figure}[h!]
\begin{center}
\begin{tikzpicture}[scale=2, line width=1pt]
\coordinate (A) at (0 ,0 ,0) ;
\coordinate (B) at (1.5,0 ,0) ;
\coordinate (C) at (1.5,1 ,0) ;
\coordinate (D) at (0 ,1 ,0) ;
\coordinate (E) at (0 ,0 ,-0.5) ;
\coordinate (F) at (1.5,0 ,-0.5) ;
\coordinate (G) at (1.5,1 ,-0.5) ;
\coordinate (H) at (0 ,1 ,-0.5) ;
\draw (D) -- (A) -- (B) -- (C) -- (D) -- (H) -- (G) -- (F) -- (B);
\draw (C) -- (G);
\draw [dashed] (A) -- (E) -- (H);
\draw [dashed] (E) -- (F);
\coordinate (I) at (3 ,0 ,0) ;
\coordinate (J) at (4.5 ,0 ,0) ;
\coordinate (K) at (3.75,1.2,0) ;
\coordinate (L) at (4 ,0.5,-0.7) ;
\draw[black] (K) -- (I) -- (J) -- (K) -- (L) -- (J) ;
\draw [dashed] (I) -- (L);
\end{tikzpicture}
\caption{3D allowed elements}
\label{3D_mesh}
\end{center}
\end{figure}
\end{itemize}
\textbf{Be carefull} non standard and hybrid meshing are not supported! (cf Figure \ref{hybr})
\begin{figure}[h!]
\begin{center}
\begin{tikzpicture}[scale=2, line width=1pt]
\coordinate (A1) at (0,0) ;
\coordinate (A2) at (0,1) ;
\coordinate (A3) at (0,2) ;
\coordinate (B1) at (1.5,0 ) ;
\coordinate (B2) at (1.5,0.5) ;
\coordinate (B3) at (1.5,1 ) ;
\coordinate (B4) at (1.5,1.5) ;
\coordinate (B5) at (1.5,2 ) ;
\coordinate (C1) at (3,0 ) ;
\coordinate (C2) at (3,0.5) ;
\coordinate (C3) at (3,1 ) ;
\coordinate (C4) at (3,1.5) ;
\coordinate (C5) at (3,2 ) ;
\draw (A1) -- (C1) -- (C5) -- (A3) -- (A1);
\draw (A2) -- (C3);
\draw (B1) -- (B5);
\draw (B4) -- (C4);
\draw (B2) -- (C2);
\draw [ultra thick,red] (-0.25,2.25) -- (3.25,-0.25) ;
\coordinate (D1) at (4 ,0) ;
\coordinate (D2) at (5.5 ,0) ;
\coordinate (D3) at (5.5 ,1.25) ;
\coordinate (D4) at (4 ,1.25) ;
\coordinate (D5) at (4.75,2.25) ;
\draw (D4) -- (D1) -- (D2) -- (D3) -- (D4) -- (D5) -- (D3);
\draw [ultra thick,red] (3.75,2) -- (6,-0.25) ;
\coordinate (E1) at (6.5 ,0 ,0) ;
\coordinate (E2) at (8 ,0 ,0) ;
\coordinate (E3) at (8 ,1.25,0) ;
\coordinate (E4) at (6.5 ,1.25,0) ;
\coordinate (E5) at (7.25,2.25,0) ;
\coordinate (E6) at (8 ,0 ,-0.7) ;
\coordinate (E7) at (8 ,1.25,-0.7) ;
\coordinate (E8) at (6.5 ,1.25,-0.7) ;
\draw (E4) -- (E1) -- (E2) -- (E3) -- (E4) -- (E5) -- (E3);
\draw (E2) -- (E6) -- (E7) -- (E8) -- (E4);
\draw (E8) -- (E5) -- (E7) -- (E3) -- (E8);
\draw [ultra thick,red] (6.5,2) -- (8.5,-0.25) ;
\end{tikzpicture}
\caption{Prohibited meshes}
\label{hybr}
\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Import a mesh file}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
If your mesh was generated with an external tool like \href{http://www.salome-platform.org}{Salom\'e} (open source software), \href{http://resource.ansys.com/Products/Other+Products/ANSYS+ICEM+CFD}{ICEM} (commercial software), \href{http://gmsh.info/}{Gmsh} (open source software, included in \trust package) or \href{http://www-cast3m.cea.fr/}{Cast3M} (CEA software), then you must use one of the following keywords into your data file:
\begin{itemize}
\item \href{\REFERENCEMANUAL\#readmed}{\textbf{Read\_MED}} for a MED file from \href{http://www.salome-platform.org}{Salom\'e}, \href{http://gmsh.info/}{Gmsh},... ,
\item \href{\REFERENCEMANUAL\#readfile}{\textbf{Read\_File}} for a binary mesh file from \href{http://resource.ansys.com/Products/Other+Products/ANSYS+ICEM+CFD}{ICEM},
\item for another format, see the \href{\REFERENCEMANUAL\#read}{\trustref Reference Manual}.
\end{itemize}
If you want to learn how to build a mesh with Salom\'e or Gmsh and read it with \trust, you can look at the exercises of the \trust tutorial: \href{TRUST_tutorial.pdf\#salome}{here} for Salom\'e and \href{TRUST_tutorial.pdf\#gmsh}{here} for Gmsh.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Quickly create a mesh}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Here is an example of a simple geometry (of non complex channel type) using the internal tool of \trust:
\begin{center}
\fbox{ \begin{minipage}[c]{0.9\textwidth}
\begin{alltt}
{\bf{Mailler}} \textit{my\_domain}
\{
\hspace{1cm} \textcolor{blue}{/* Define the domain with one cavity */}
\hspace{1cm} \textcolor{blue}{/* cavity 1m*2m with 5*22 cells */}
\hspace{1cm} {\bf{Pave}} \textit{box}
\hspace{1cm} \{
\hspace{2cm} {\bf{Origine}} 0. 0.
\hspace{2cm} {\bf{Longueurs}} 1 2
\hspace{2cm} \textcolor{blue}{/* Cartesian grid */}
\hspace{2cm} {\bf{Nombre\_de\_Noeuds}} 6 23
\hspace{2cm} \textcolor{blue}{/* Uniform mesh */}
\hspace{2cm} {\bf{Facteurs}} 1. 1.
\hspace{1cm} \}
\hspace{1cm} \{
\hspace{2cm} \textcolor{blue}{/* Definition and names of boundary conditions */}
\hspace{2cm} {\bf{bord}} \textit{Inlet} \hspace{0.25cm} X = 0. 0. <= Y <= 2.
\hspace{2cm} {\bf{bord}} \textit{Outlet} \hspace{0.05cm} X = 1. 0. <= Y <= 2.
\hspace{2cm} {\bf{bord}} \textit{Upper} \hspace{0.25cm} Y = 2. 0. <= X <= 1.
\hspace{2cm} {\bf{bord}} \textit{Lower} \hspace{0.25cm} Y = 0. 0. <= X <= 1.
\hspace{1cm} \}
\}
\end{alltt}
\end{minipage}}
\end{center}
To use this mesh in your data file, you just have to add the previous block in your data file or save it in a file named for example "\textit{my\_mesh.geo}" and add the line:\\
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
{\bf{Read\_file}} \textit{my\_mesh.geo} \textcolor{red}{{\bf{;}}}
\end{alltt}
\end{minipage}}
\end{center}
\underline{Do not forget the semicolon at the end of the line!}\\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Transform mesh within the data file}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
You can also make transformations on your mesh after the \textbf{"Mailler"} or \textbf{"Read\_*"} command, using the following keywords:
\begin{itemize}
\item \href{\REFERENCEMANUAL\#triangulate}{\textbf{Trianguler}} to triangulate your 2D cells and create an unstructured mesh.
\item \href{\REFERENCEMANUAL\#tetraedriser}{\textbf{Tetraedriser}} to tetrahedralise 3D cells and create an unstructured mesh.
\item \href{\REFERENCEMANUAL\#raffineranisotrope}{\textbf{Raffiner\_anisotrope}}/\href{\REFERENCEMANUAL\#raffinerisotrope}{\textbf{Raffiner\_isotrope}} to triangulate/tetrahedralise elements of an untructured mesh.
\item \href{\REFERENCEMANUAL\#extrudebord}{\textbf{ExtrudeBord}} to generate an extruded mesh from a boundary of a tetrahedral or an hexahedral mesh.
\Note that ExtrudeBord in VEF generates 3 or 14 tetrahedra from extruded prisms.
\item \href{\REFERENCEMANUAL\#regroupebord}{\textbf{RegroupeBord}} to build a new boundary with several boundaries of the domain.
\item \href{\REFERENCEMANUAL\#transformer}{\textbf{Transformer}} to transform the coordinates of the geometry.
\item for other commands, see the section \href{\REFERENCEMANUAL\#interprete}{interprete} of the \trustref Reference Manual.
\end{itemize}
\Note that theses mesh modifications work on all mesh types (i.e. also for \textbf{*.geo} or \textbf{*.bin} or \textbf{*.med} files).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Test your mesh}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The keyword \href{\REFERENCEMANUAL\#discretiserdomaine}{\textbf{Discretiser\_domaine}} is useful to discretize the domain (faces will be created) without defining a problem.
Indeed, you can create a minimal data file, post-process your mesh in lata format (for example) and visualize it with VisIt. \\
\Note that you must name all the boundaries!\\
Here is an example of this kind of data file:
\begin{center}
\fbox{ \begin{minipage}[c]{0.8\textwidth}
\begin{center}
\textbf{my\_data\_file.data}
\end{center}
\end{minipage}}
\fbox{ \begin{minipage}[c]{0.8\textwidth}
\begin{alltt}
{\bf{dimension 3}}
{\bf{Domaine}} \textit{my\_domain}
{\bf{Mailler}} \textit{my\_domain}
\{
\hspace{1cm} {\bf{Pave}} \textit{box}
\hspace{1cm} \{
\hspace{2cm} {\bf{Origine}} 0. 0. 0.
\hspace{2cm} {\bf{Longueurs}} 1 2 1
\hspace{2cm} {\bf{Nombre\_de\_Noeuds}} 6 23 6
\hspace{2cm} {\bf{Facteurs}} 1. 1. 1.
\hspace{1cm} \}
\hspace{1cm} \{
\hspace{2cm} {\bf{bord}} \textit{Inlet} \hspace{0.25cm} X = 0. 0. <= Y <= 2. 0. <= Z <= 1.
\hspace{2cm} {\bf{bord}} \textit{Outlet} \hspace{0.05cm} X = 1. 0. <= Y <= 2. 0. <= Z <= 1.
\hspace{2cm} {\bf{bord}} \textit{Upper} \hspace{0.25cm} Y = 2. 0. <= X <= 1. 0. <= Z <= 1.
\hspace{2cm} {\bf{bord}} \textit{Lower} \hspace{0.25cm} Y = 0. 0. <= X <= 1. 0. <= Z <= 1.
\hspace{2cm} {\bf{bord}} \textit{Front} \hspace{0.25cm} Z = 0. 0. <= X <= 1. 0. <= Y <= 2.
\hspace{2cm} {\bf{bord}} \textit{Back} \hspace{0.45cm} Z = 1. 0. <= X <= 1. 0. <= Y <= 2.
\hspace{1cm} \}
\}
{\bf{discretiser\_domaine}} \textit{my\_domain}
{\bf{postraiter\_domaine}} \{ {\bf{domaine}} \textit{my\_domain} {\bf{format lata}} \}
{\bf{End}}
\end{alltt}
\end{minipage}}
\end{center}
To use it, launch in a bash terminal:
\begin{verbatim}
# if not already done
> source $my_path_to_TRUST_installation/env_TRUST.sh
# then
> trust my_data_file
> visit -o my_data_file.lata &
\end{verbatim}
To see how to use VisIt, look at the first \trust tutorial exercise: \href{TRUST_tutorial.pdf\#exo1}{Flow around an obstacle}.\\
If you want to learn how to make a mesh with Salom\'e or Gmsh and read it with \trust, you can look at the exercises of the \trust tutorial: \href{TRUST_tutorial.pdf\#salome}{here} for Salom\'e and \href{TRUST_tutorial.pdf\#gmsh}{here} for Gmsh.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Discretization}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
You have to specify the discretization type which can be \href{\REFERENCEMANUAL\#vdf}{\textbf{VDF}}, \href{\REFERENCEMANUAL\#ef}{\textbf{EF}} or \href{\REFERENCEMANUAL\#vefprep1b}{\textbf{VEFPreP1B}}.\\
In \textbf{VDF} discretization, the locations of the unknowns are drawn in the Figure \ref{fig_VDF}.\\
\begin{figure}[h!]
\begin{center}
\begin{tikzpicture}[scale=2, line width=1pt]
\coordinate (A) at (0,0) ;
\coordinate (B) at (1,0) ;
\coordinate (C) at (2,0) ;
\coordinate (D) at (2,1) ;
\coordinate (E) at (2,2) ;
\coordinate (F) at (1,2) ;
\coordinate (G) at (0,2) ;
\coordinate (H) at (0,1) ;
\draw[black] (A) -- (C) -- (E) -- (G) -- cycle ;
\draw[black] (H) -- (D) ;
\draw[black] (F) -- (B) ;
\draw[black,fill=red] (0.5,0.5) circle (0.07);
\draw[black,fill=red] (1.5,0.5) circle (0.07);
\draw[black,fill=red] (0.5,1.5) circle (0.07);
\draw[black,fill=red] (1.5,1.5) circle (0.07);
\draw[blue] [->] [>=latex] (0.5,0) -- (0.5,0.35);
\draw[blue] [->] [>=latex] (0.5,1) -- (0.5,1.35);
\draw[blue] [->] [>=latex] (0.5,2) -- (0.5,2.35);
\draw[blue] [->] [>=latex] (1.5,0) -- (1.5,0.35);
\draw[blue] [->] [>=latex] (1.5,1) -- (1.5,1.35);
\draw[blue] [->] [>=latex] (1.5,2) -- (1.5,2.35);
\draw[blue] [->] [>=latex] (0,0.5) -- (0.35,0.5);
\draw[blue] [->] [>=latex] (1,0.5) -- (1.35,0.5);
\draw[blue] [->] [>=latex] (2,0.5) -- (2.35,0.5);
\draw[blue] [->] [>=latex] (0,1.5) -- (0.35,1.5);
\draw[blue] [->] [>=latex] (1,1.5) -- (1.35,1.5);
\draw[blue] [->] [>=latex] (2,1.5) -- (2.35,1.5);
\draw[blue] (4,2) node[above]{$\vec{V} \cdot \vec{n}$} ;
\draw[blue] [<-] (1.65,2.2) -- (3.7,2.1);
\draw[blue] [<-] (2.25,1.6) -- (3.7,2.1);
\draw[red] (4,0.5) node {$P, T, k, \varepsilon, ...$} ;
\draw[red] [<-] (1.6,1.4) -- (3.45,0.55);
\end{tikzpicture}
\caption{VDF unknown locations}
\label{fig_VDF}
\end{center}
\end{figure}
For \textbf{VEFPreP1B}, the locations of the unknowns are drawn in the Figure \ref{fig_VEF}.\\
\begin{figure}[h!]
\begin{center}
\begin{tikzpicture}[scale=1, line width=1pt]
\coordinate (A) at (1.5,0) ;
\coordinate (B) at (6,0.5) ;
\coordinate (C) at (8,3.5) ;
\coordinate (D) at (2.5,4.5) ;
\coordinate (E) at (0,3) ;
\coordinate (F) at (4,2.5) ;
\draw[black] (A) -- (B) -- (C) -- (D) -- (E) -- (A) -- (F) -- (B);
\draw[black] (C) -- (F) -- (D);
\draw[black] (E) -- (F);
\draw[black,fill=black] (A) circle (0.15);
\draw[black,fill=black] (B) circle (0.15);
\draw[black,fill=black] (C) circle (0.15);
\draw[black,fill=black] (D) circle (0.15);
\draw[black,fill=black] (E) circle (0.15);
\draw[black,fill=black] (F) circle (0.15);
\draw[black,fill=red] (3.75,1) circle (0.15);
\draw[black,fill=red] (6,2) circle (0.15);
\draw[black,fill=red] (4.75,3.5) circle (0.15);
\draw[black,fill=red] (2.2,3.4) circle (0.15);
\draw[black,fill=red] (1.75,1.75) circle (0.15);
\begin{scope}[xshift=5cm, yshift=1.5cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=7cm, yshift=2cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=5.2cm, yshift=4cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=1.25cm, yshift=3.75cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=0.75cm, yshift=1.5cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=2.75cm, yshift=1.25cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=3.75cm, yshift=0.25cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=6cm, yshift=3cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=3.25cm, yshift=3.5cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\begin{scope}[xshift=2cm, yshift=2.75cm]
\draw[blue] [->] [>=latex] (0,0) -- (0.8,0.3);
\end{scope}
\draw[blue] (11.5,3.5) node {$\vec{V_x}, \vec{V_y}, \vec{V_z}, T, k, \varepsilon, ...$} ;
\draw[blue] [->] [>=latex] (9.5,3.5) -- (5.6,4.1);
\draw[blue] [->] [>=latex] (9.5,3.5) -- (3.5,3.5);
\draw[blue] [->] [>=latex] (9.5,3.5) -- (6.25,3);
\draw[red] (11.5,2) node {$P$ for P0 discretization} ;
\draw[red] [->] [>=latex] (9.25,2) -- (6.25,2);
\draw[black] (11.5,1) node {$P$ for P1 discretization} ;
\draw[black] [->] [>=latex] (9.25,1) -- (6.25,0.5);
\end{tikzpicture}
\caption{VEF unknown locations in 2D}
\label{fig_VEF}
\end{center}
\end{figure}
In 3D for the pressure, we can also use the P0+P1+Pa discretization for flow with a strong source term and a low velocity field.
In this case P0+P1 pressure gradient has trouble to match the source term so we use P0+P1+Pa discretization (cf Figure \ref{fig_VEF_pressure_loc}).
\begin{figure}[h!]
\begin{center}
\begin{tikzpicture}[scale=1, line width=1pt]
\coordinate (A) at (0.5,1) ;
\coordinate (B) at (3,0.5) ;
\coordinate (C) at (4,2.5) ;
\coordinate (D) at (2,4) ;
\draw[black] (B) -- (C) -- (D) -- (A) -- (B) -- (D);
\draw[black,dashed] (A) -- (C);
\draw[black,fill=black] (A) circle (0.15);
\draw[black,fill=black] (B) circle (0.15);
\draw[black,fill=black] (C) circle (0.15);
\draw[black,fill=black] (D) circle (0.15);
\draw[black,fill=Greeen] (1.75,0.75) circle (0.15);
\draw[black,fill=Greeen] (3.5,1.5) circle (0.15);
\draw[black,fill=Greeen] (3,3.25) circle (0.15);
\draw[black,fill=Greeen] (1.25,2.5) circle (0.15);
\draw[black,fill=Greeen] (2.25,1.75) circle (0.15);
\draw[black,fill=Greeen] (2.5,2.25) circle (0.15);
\draw[black,fill=red] (2.2,2) circle (0.15);
\draw[black] (10,3) node {$P$ for P1 discretization} ;
\draw[black] [->] [>=latex] (7.75,3) -- (2.2,4);
\draw[black] [->] [>=latex] (7.75,3) -- (4.2,2.5);
\draw[red] (10,2) node {$P$ for P0 discretization} ;
\draw[red] [->] [>=latex] (7.75,2) -- (2.5,2);
\draw[Greeen] (10,1) node {$P$ for Pa discretization} ;
\draw[Greeen] [->] [>=latex] (7.75,1) -- (3.7,1.5);
\draw[Greeen] [->] [>=latex] (7.75,1) -- (2,0.75);
\end{tikzpicture}
\caption{VEF pressure location in 3D}
\label{fig_VEF_pressure_loc}
\end{center}
\end{figure}
To specify the wanted discretization, you have to add the following block to your data file:
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
\textit{{\bf{Discretization\_type}} my\_discretization}
[{\bf{Read }} \textit{my\_discretization \{ ... \}}]
\end{alltt}
\end{minipage}}
\end{center}
You can add parameters to your discretization with the optional keyword \href{\REFERENCEMANUAL\#read}{\textbf{Read}} (see \href{\REFERENCEMANUAL\#vefprep1b}{\textbf{VEFPreP1B discretization}}).
On the \href{http://triocfd.cea.fr/}{TrioCFD website}, you can find information about:
\begin{itemize}
\item \textbf{VDF} discretization in the \href{http://triocfd.cea.fr/Documents/DOCS THESES/these_chatelain_2004.pdf}{PhD thesis of A. Chatelain},
\item \textbf{VEFPreP1B} discretization (Crouzet-Raviart elements) in the \href{http://triocfd.cea.fr/Documents/DOCS THESES/these_fortin_2006.pdf}{PhD thesis of T. Fortin} and \href{http://triocfd.cea.fr/Documents/DOCS THESES/These_Heib_2003.pdf}{PhD thesis of S. Heib}.
\end{itemize}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Time schemes}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Now you can choose your time scheme to solve your problem. For this you must
specify the time scheme type wanted and give it a name.
then you have to specify its parameters by filling the associated \textbf{"Read"} block.
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
{\bf{\textit{Scheme\_type}}} \textit{my\_time\_scheme}
{\bf{Read}} \textit{my\_time\_scheme} \{ ... \}
\end{alltt}
\end{minipage}}
\end{center}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Some available time schemes}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Here are some \href[page=DOCLINK_TIME SCHEMES]{\REFERENCEMANUAL}{available types of explicit schemes}:
Here are some available types of explicit schemes:
\begin{itemize}
\item \href{\REFERENCEMANUAL\#eulerscheme}{\textbf{Scheme\_Euler\_explicit}},
\item \href{\REFERENCEMANUAL\#schemaadamsbashforthorder2}{\textbf{Schema\_Adams\_Bashforth\_order\_2}},
%\item \textbf{Schema\_Adams\_Bashforth\_order\_3}
%\item \textbf{Runge\_Kutta\_Rationnel\_ordre\_2}
\item \href{\REFERENCEMANUAL\#rungekuttaordre3}{\textbf{Runge\_Kutta\_ordre\_3}},
%\item \textbf{Runge\_Kutta\_ordre\_4\_D3P}
%\item \textbf{Schema\_Predictor\_Corrector}
%\item \textbf{Sch\_CN\_iteratif}
%\item \textbf{Sch\_CN\_EX\_iteratif}
%\item \textbf{Schema\_Phase\_Field}
%\item \textbf{RK3\_FT}
\end{itemize}
%And also some \href[page=DOCLINK_TIME SCHEMES]{\REFERENCEMANUAL}{available types of implicit schemes}:
And also some available types of implicit schemes:
\begin{itemize}
\item \href{\REFERENCEMANUAL\#schemaeulerimplicite}{\textbf{Scheme\_Euler\_implicit}},
%\item \textbf{Schema\_Adams\_Moulton\_order\_2}
\item \href{\REFERENCEMANUAL\#schemaadamsmoultonorder3}{\textbf{Schema\_Adams\_Moulton\_order\_3}}.
%\item \textbf{Schema\_Backward\_Differentiation\_order\_2}
%\item \textbf{Schema\_Backward\_Differentiation\_order\_3}
\end{itemize}
For other schemes, see \href{\REFERENCEMANUAL\#schematempsbase}{this section} of the Reference Manual.\\
\Note that you can use semi-implicit schemes activating the \textbf{diffusion\_implicite} keyword in your explicit time scheme.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Calculation stopping condition}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
You must specify at least one stopping condition for you simulation.
It can be:
\begin{itemize}
\item the final time: \textbf{tmax}
\item the maximal allowed cpu time: \textbf{tcpumax}
\item the number of time step: \textbf{nb\_pas\_dt\_max}
\item the convergency treshold: \textbf{seuil\_statio}
\end{itemize}
\Note that if the time step reaches the minimal time step \textbf{dt\_min}, \trust will stop the calculation.\\
If you want to stop properly your running calculation (i.e. with all saves), you may use the \textit{my\_data\_file}.stop file (cf section \ref{stopfile}).
When the simulation is running, you can see the "\textbf{0}" value in that file.\\
To stop it, put a "\textbf{1}" instead of the "\textbf{0}", save the file and at the next iteration the calculation will stop properly.\\
When you don't change anything in that file, at the end of the calculation, you can see that it is writen "\textbf{Finished correctly}".
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Medium/Type of fluide}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
To specify the medium or fluid, you must add the following block.
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
{\bf{\textit{Fluid\_type}}} \textit{my\_medium}
{\bf{Read}} \textit{my\_medium} \{ ... \}
\end{alltt}
\end{minipage}}
\end{center}
{\bf{\textit{Fluid\_type}}} can be one of the following:
\begin{itemize}
\item \href{\REFERENCEMANUAL\#fluideincompressible}{\textbf{Fluide\_incompressible}}
\item \href{\REFERENCEMANUAL\#fluidequasicompressible}{\textbf{Fluide\_quasi\_compressible}}
%\item \textbf{Fluide\_Ostwald}
%\item \textbf{Constituant}
\item \href{\REFERENCEMANUAL\#solide}{\textbf{Solide}}
\item for other types and more information see \href{\REFERENCEMANUAL\#milieubase}{\trustref Reference Manual}.
\end{itemize}
If you want to use more than one medium, you can add another blocks for each medium or fluid.\\
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Add gravity}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
If needed, you can add a gravity term to your simulation. This is done by adding
a uniform field, no matter his name. For example in 2D:
\begin{center}
\fbox{ \begin{minipage}[c]{0.5\textwidth}
\begin{alltt}
\textcolor{blue}{\# Gavity vector definition \#}
{\bf{Uniform\_field}} \textit{my\_gravity}
{\bf{Read}} \textit{my\_gravity 2 0 -9.81}
\end{alltt}
\end{minipage}}
\end{center}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Objects association and discretization}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Association}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Until now, we have created some objects, now we must associate them together.
For this, we must use the \href{\REFERENCEMANUAL\#associate}{\textbf{Associate}} interpretor:
\begin{center}
\fbox{ \begin{minipage}[c]{0.7\textwidth}
\begin{alltt}
\textcolor{blue}{\# Association between the different objects \#}
{\bf{Associate}} \textit{my\_problem my\_domain}
{\bf{Associate}} \textit{my\_problem my\_time\_scheme}
{\bf{Associate}} \textit{my\_problem my\_medium}
[{\bf{Associate}} \textit{my\_medium my\_gravity}]
\end{alltt}
\end{minipage}}
\end{center}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Discretization}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Then you must discretize your domain using the \href{\REFERENCEMANUAL\#discretize}{\textbf{Discretize}} interpretor:
\begin{center}
\fbox{ \begin{minipage}[c]{0.7\textwidth}
\begin{alltt}
{\bf{Discretize}} \textit{my\_problem my\_discretization}
\end{alltt}
\end{minipage}}
\end{center}
The problem \textit{my\_problem} is discretized according to the \textit{my\_discretization} discretization.\\
IMPORTANT: A number of objects must be already associated (a domain, time scheme, central object) prior to invoking the \textbf{Discretize} keyword. The physical properties of this central object must also have been read.\\
\Note that when the discretization step succeeds, the mesh is validated by the code.\\
At this level of your data file, you can visualize your mesh with the "\textbf{-mesh}" option of the trust script, it will directly open your mesh with VisIt.
\begin{verbatim}
# if not already done
> source $my_path_to_TRUST_installation/env_TRUST.sh
# then
> trust -mesh my_data_file
\end{verbatim}
It will only run the mesh and stop, the problem will not be solved.
| {
"alphanum_fraction": 0.6215948138,
"avg_line_length": 37.8912751678,
"ext": "tex",
"hexsha": "1e3b755bb476b5364766d4a8a5bf5ef84aabf98e",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "46ab5c5da3f674185f53423090f526a38ecdbad1",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "pledac/trust-code",
"max_forks_repo_path": "doc/TRUST/Generic_Guide/chap3_data_setting.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "46ab5c5da3f674185f53423090f526a38ecdbad1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "pledac/trust-code",
"max_issues_repo_path": "doc/TRUST/Generic_Guide/chap3_data_setting.tex",
"max_line_length": 437,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "46ab5c5da3f674185f53423090f526a38ecdbad1",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "pledac/trust-code",
"max_stars_repo_path": "doc/TRUST/Generic_Guide/chap3_data_setting.tex",
"max_stars_repo_stars_event_max_datetime": "2021-10-04T09:20:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-10-04T09:20:19.000Z",
"num_tokens": 10093,
"size": 28229
} |
\documentclass[]{article}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={Spring 2018 Strategic Plan},
pdfauthor={Ada},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{longtable,booktabs}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\newcommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{Spring 2018 Strategic Plan}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\author{Ada}
\preauthor{\centering\large\emph}
\postauthor{\par}
\predate{\centering\large\emph}
\postdate{\par}
\date{April 11, 2018}
\begin{document}
\maketitle
\textbf{Goals:} Specific, Measurable, Attractive, Realistic, Time-framed
\section{Writing/Research Goals}\label{writingresearch-goals}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Presentation for ASM on rejected articles
\begin{enumerate}
\def\labelenumii{\roman{enumii})}
\tightlist
\item
Analyze/plot data
\item
Gather current data
\item
Choose important elements
\item
Rmarkdown memo
\item
Make presentation
\item
Practice talk
\end{enumerate}
\item
Parse/sort all ASM XML files
\begin{enumerate}
\def\labelenumii{\roman{enumii})}
\tightlist
\item
l/sapply parsing functions to multiple XML files
\item
link data from XML files for relevant manuscripts
\item
learn to get on Flux
\item
parse/compile data from 100 XML files
\item
parse/compile data from all AEM XML files
\end{enumerate}
\item
Complete 2 Coursera courses
\begin{enumerate}
\def\labelenumii{\roman{enumii})}
\tightlist
\item
Intro to Probabily \& Data
\item
Inferential Statistics
\end{enumerate}
\item
Submit BA manuscript
\begin{enumerate}
\def\labelenumii{\roman{enumii})}
\tightlist
\item
Fix citation issues
\item
Get LAESI data - Ashu
\item
Generate/fix/rearrange figures
\item
Rewrite text as needed - limitations
\item
Get further co-author comments
\item
Make final changes
\item
Double check references
\item
Submit as preprint
\end{enumerate}
\end{enumerate}
\subsubsection{Looking ahead to next Strategic
Plan}\label{looking-ahead-to-next-strategic-plan}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Complete 2 Coursera courses
\begin{enumerate}
\def\labelenumii{\roman{enumii})}
\tightlist
\item
Linear Regression \& Modeling
\item
Bayesian Statistics
\end{enumerate}
\item
Learn about genderize.io \& how to link data
\end{enumerate}
\section{Personal Goals}\label{personal-goals}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Yoga 2 - 3x per week: Friday/Sunday/Tuesday
\item
Meal planning: Fridays
\begin{enumerate}
\def\labelenumii{\roman{enumii})}
\tightlist
\item
find good recipes
\item
make shopping lists
\item
cook/plan what nights
\end{enumerate}
\item
Clean and purge spare room
\begin{enumerate}
\def\labelenumii{\roman{enumii})}
\tightlist
\item
vacuum \& take out trash
\item
sort papers on desk
\item
sort/purge Dax's old clothes \& toys
\item
clean \& purge closet
\item
clean \& purge dresser
\item
dispose of purged items
\item
clean carpets
\end{enumerate}
\end{enumerate}
\newpage
\section{Strategic Plan}\label{strategic-plan}
\begin{longtable}[]{@{}clll@{}}
\toprule
\begin{minipage}[b]{0.04\columnwidth}\centering\strut
Date\strut
\end{minipage} & \begin{minipage}[b]{0.24\columnwidth}\raggedright\strut
Distractions\strut
\end{minipage} & \begin{minipage}[b]{0.33\columnwidth}\raggedright\strut
Writing/Research Goals\strut
\end{minipage} & \begin{minipage}[b]{0.28\columnwidth}\raggedright\strut
Personal/Other Goals\strut
\end{minipage}\tabularnewline
\midrule
\endhead
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
4/2\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
Intro wk2\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan; taxes\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
4/9\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
Meet with Pat \& fine-tune ASM data goals; Fix citation issues; Intro
wk3\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan; vaccum \& sort Dax's clothes/toys\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
4/16\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
Make requested changes \& contact Melissa; Intro wk4; Bug Ashu; l/sapply
parsing functions to multiple XML files\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan; take clothes/toys to OUaC\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
4/23\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
Rackham Graduation (F)\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
Analyze/plot/gather ASM data; Intro wk5; generate/fix MS figures; link
data from XML pairs\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan; sort papers\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
4/30\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
Lab meeting presentation(R); Hayley \& Zack in town (F)\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
Analyze/gather/plot ASM data; Inferential wk1; rearrange figures \&
rewrite text as needed\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
5/7\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
Choose important points for presentation; send MS back to co-authors;
Inferential wk2\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan; purge/organize closet\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
5/14\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
Anniversary (T)\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
finalize Rmarkdown; Inferential wk3; parse/compile 50 XML files\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan; chest of drawers?\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
5/21\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
ASM blog post due (M)\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
Slides for talk; Inferential wk4; parse/compile 100 XML files\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan; dispose of purge\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
5/28\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
Memorial day (M); Lab meeting presentation (R)\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
figure out Flux/genderize.io; ASM practice talk; Inferential wk5\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
6/4\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
ASM Microbe (T-S)\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
ASM presentation\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
6/11\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
make final changes \& fix citations; parse/compile AEM XML files;
Modeling wk1\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
6/18\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
parse/compile JVI XML files\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
submit preprint; Modeling wk2\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
6/25\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
parse/compile all XML files; Modeling wk3\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
yoga x2; meal plan\strut
\end{minipage}\tabularnewline
\begin{minipage}[t]{0.04\columnwidth}\centering\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.24\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.33\columnwidth}\raggedright\strut
\strut
\end{minipage} & \begin{minipage}[t]{0.28\columnwidth}\raggedright\strut
\strut
\end{minipage}\tabularnewline
\bottomrule
\end{longtable}
\begin{enumerate}
\def\labelenumi{\arabic{enumi})}
\item
My goals for last week were
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_.
\item
I did (or did not) meet them.
\item
If not, I didn't meet my goals because
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_.
\item
My goals for next week are
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
\item
I need help with
\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_\_
\end{enumerate}
\end{document}
| {
"alphanum_fraction": 0.7471423415,
"avg_line_length": 32.3385214008,
"ext": "tex",
"hexsha": "d04167feb0fe9c37f42356c7e4a3cefefab266f4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2c954d6d647b3f04b5f6700e685dfd69dea2adcf",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "akhagan/Notebook",
"max_forks_repo_path": "2018 lab notebooks/Spring_2018_Goals.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2c954d6d647b3f04b5f6700e685dfd69dea2adcf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "akhagan/Notebook",
"max_issues_repo_path": "2018 lab notebooks/Spring_2018_Goals.tex",
"max_line_length": 83,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2c954d6d647b3f04b5f6700e685dfd69dea2adcf",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "akhagan/Notebook",
"max_stars_repo_path": "2018 lab notebooks/Spring_2018_Goals.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6299,
"size": 16622
} |
%%%% IEEE DEFAULTS
\documentclass[conference]{IEEEtran}
\IEEEoverridecommandlockouts
% The preceding line is only needed to identify funding in the first footnote. If that is unneeded, please comment it out.
%\usepackage{cite} % should not be used with natbib
%\usepackage{amsmath,amssymb,amsfonts}
%\usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{enumitem}
\usepackage{subcaption}
\usepackage{adjustbox}
\usepackage{relsize}
\usepackage[font=small]{caption}
%\usepackage{textcomp}
%foot note
\usepackage[bottom]{footmisc}
\usepackage{xcolor}
%\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
% T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
%%%% USER SETTINGS
\PassOptionsToPackage{hyphens}{url}
\usepackage[hidelinks]{hyperref}
\hypersetup{hidelinks}% doppelt hält besser
%\usepackage[acronym]{glossaries} % https://www.overleaf.com/learn/latex/Glossaries#Acronyms
\usepackage[acronym]{glossaries}
\makeglossaries
\input{acronym}
\usepackage{hyperref}
\usepackage{csquotes}
\usepackage[numbers]{natbib}
\bibliographystyle{IEEEtranN}
\renewcommand{\bibfont}{\footnotesize} % for IEEE bibfont size
\usepackage[per-mode=symbol-or-fraction]{siunitx}
%\usepackage{flushend}
%\bstctlcite{bibliography:BSTcontrol}
\usepackage{array}
\renewcommand\arraystretch{1.25}
\usepackage{booktabs}
\renewcommand{\figurename}{Figure}
\newcommand{\cgn}[1]{\textcolor{blue}{(GN): #1}}
% todo notes:
\setlength {\marginparwidth}{2cm}
\usepackage{todonotes}
\presetkeys{todonotes}{inline}{}
\usepackage{pgf}
\usepackage{tikz}
\usetikzlibrary{shapes.geometric, arrows}
\tikzstyle{startstop} = [rectangle, rounded corners, minimum width=3cm, minimum height=1cm,text centered, draw=black, fill=red!30]
\tikzstyle{io} = [trapezium, trapezium left angle=70, trapezium right angle=110, minimum width=3cm, minimum height=1cm, text centered, draw=black, fill=blue!30]
\tikzstyle{process} = [rectangle, minimum width=3cm, minimum height=1cm, text centered, draw=black, fill=orange!30]
\tikzstyle{decision} = [diamond, minimum width=3cm, minimum height=1cm, text centered, draw=black, fill=green!30]
\usetikzlibrary{backgrounds}
\usetikzlibrary{arrows}
\usetikzlibrary{shapes,shapes.geometric,shapes.misc}
\usetikzlibrary{decorations.pathreplacing,calc,shadows.blur,shapes}
\usetikzlibrary{positioning}
\usetikzlibrary{shapes.geometric}
\tikzstyle{tikzfig}=[baseline=-0.25em,scale=0.5]
\usepackage{color}
\usetikzlibrary{positioning}
\begin{document}
\title{Need for Speed: Hardware-based Radios for virtualized Radio Access Networks}
\author{
\IEEEauthorblockN{
Javier Acevedo\IEEEauthorrefmark{1},
Marian Ulbricht\IEEEauthorrefmark{1},
Florian Grabs\IEEEauthorrefmark{1},
Andreas Igno Grohmann\IEEEauthorrefmark{1},
Giang T. Nguyen\IEEEauthorrefmark{2}\IEEEauthorrefmark{4},\\ % needed to line break because of too many authors :)
Patrick Seeling\IEEEauthorrefmark{3},
and
Frank H. P. Fitzek\IEEEauthorrefmark{1}\IEEEauthorrefmark{4}
}
\IEEEauthorblockA{
\IEEEauthorrefmark{1} Deutsche Telekom Chair of Communication Networks, TU Dresden
}
\IEEEauthorblockA{
\IEEEauthorrefmark{2} Chair of Haptic Communication Systems, TU Dresden
}
\IEEEauthorblockA{
\IEEEauthorrefmark{3} Department of Computer Science, Central Michigan University
}
\IEEEauthorblockA{
\IEEEauthorrefmark{4} Centre for Tactile Internet with Human-in-the-Loop (CeTI)
}
E-mails: \{firstname.lastname\}@tu-dresden.de\IEEEauthorrefmark{1}\IEEEauthorrefmark{2} ; \{firstname.lastname\}@cmich.edu\IEEEauthorrefmark{3}
}
\maketitle
% enable page numbers, remove for final version
\thispagestyle{plain}
\pagestyle{plain}
\begingroup\renewcommand\thefootnote{\textsection}
\endgroup
\input{sections/00_abstract}
\begin{IEEEkeywords}
Hardware Accelerator, Haptic Communication, Tactile Internet, Cryptographic Algorithms, Human-machine Interaction, Ultra-low Latency, High-performance Computing, Network Security Functions.
\end{IEEEkeywords}
\input{sections/10_introduction}
\input{sections/20_technical_background}
\input{sections/30_related_work}
\input{sections/40_methodology}
\input{sections/50_evaluation}
\input{sections/60_outlook}
\input{sections/70_conclusions}
%\input{70_conclusions}
\section*{Acknowledgement}
This work was funded by the German Research Foundation (DFG, Deutsche Forschungsgemeinschaft) as part of Germany’s Excellence Strategy – EXC 2050/1 – Project ID 390696704 – Cluster of Excellence “Centre for Tactile Internet with Human-in-the-Loop” (CeTI) of Technische Universität Dresden.
We also would like to thank Juan Cabrera and Riccardo Bassoli for their valuable insights during the implementation of the results presented in this work.
%Print bibliography with IEEE Tran style
\bibliography{bibliography}
\end{document}
| {
"alphanum_fraction": 0.7912041885,
"avg_line_length": 35.3703703704,
"ext": "tex",
"hexsha": "df1a729ce01e0bc2b76f0cc96e4fd4aee0568688",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "19a40910f0fd3d78a65f1aedc17747f6bb770f62",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jracevedob/Post-Shannon-SDR",
"max_forks_repo_path": "Publications/PostShannon.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "19a40910f0fd3d78a65f1aedc17747f6bb770f62",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jracevedob/Post-Shannon-SDR",
"max_issues_repo_path": "Publications/PostShannon.tex",
"max_line_length": 289,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "19a40910f0fd3d78a65f1aedc17747f6bb770f62",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mfkiwl/Post-Shannon-SDR",
"max_stars_repo_path": "Publications/PostShannon.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-25T17:03:53.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-17T08:39:10.000Z",
"num_tokens": 1441,
"size": 4775
} |
We evaluate the efficacy of Roots as a performance monitoring and root cause
analysis system for PaaS applications.
To do so, we consider its ability to identify and characterize SLO violations.
For violations that are not caused by a change in workload, we evaluate Roots' ability to identify
the PaaS component that is the cause of the performance anomaly. We also
evaluate the Roots path distribution analyzer, and its ability to identify
execution paths along with changes in path distributions.
Finally, we investigate the performance and scalability of the Roots
prototype.
\subsection{Anomaly Detection: Accuracy and Speed}
\begin{table}
\begin{center}
\begin{tabular}{|c|p{1cm}|p{1cm}|p{1cm}|}
\hline
Faulty PaaS Service & $L_1$ (30ms) & $L_2$ (35ms) & $L_3$ (45ms) \\ \hline
datastore & 18 & 11 & 10 \\ \hline
user management & 19 & 15 & 10 \\ \hline
\end{tabular}
\end{center}
\caption{Number of anomalies detected in guestbook app under different SLOs
($L_1$, $L_2$ and $L_3$) when injecting faults into two different PaaS kernel services.
\label{tab:anomaly_counts}
}
\end{table}
To begin the evaluation of the Roots prototype we experiment with
the SLO-based anomaly detector, using a simple HTML-producing Java
web application called ``guestbook''.
This application allows users to login, and post comments. It uses the
AppScale datastore service to save
the posted comments, and the AppScale user management service to handle authentication. Each request processed
by guestbook results in two PaaS kernel invocations -- one to check if the user is logged in, and
another to retrieve the existing comments from the datastore. We conduct all
our experiments on a single node AppScale cloud except where specified. The node itself is an Ubuntu
14.04 VM with 4 virtual CPU cores (clocked at 2.4GHz) and 4GB of memory.
We run the SLO-based anomaly detector on guestbook with a sampling rate of 15 seconds, an analysis
rate of 60 seconds, and a window size of 1 hour. We set the minimum sample count to 100, and
run a series of experiments with different SLOs on the guestbook application. Specifically, we fix
the SLO success probability at 95\%, and set the response time upper bound to $\mu_g + n\sigma_g$.
$\mu_g$ and $\sigma_g$ represent the mean and standard deviation of the
guestbook's response time. We learn these two parameters apriori by benchmarking
the application. Then we obtain three different upper bound values for the guestbook's
response time by setting
$n$ to 2, 3 and 5 and denote the resulting three SLOs $L_1$, $L_2$ and $L_3$ respectively.
We also inject performance faults into AppScale by modifying its code
to cause the datastore service to be slow to respond.
This fault injection logic activates once every hour, and
slows down all datastore invocations by 45ms over a period of 3 minutes.
We chose 45ms because it is equal
to $\mu_g + 5\sigma_g$ for the AppScale deployment under test.
Therefore this delay is sufficient to violate all three SLOs used in our experiments.
We run a similar set of experiments where we inject faults into the user management service of
AppScale. Each experiment is run for a period of 10 hours.
Table~\ref{tab:anomaly_counts} shows how the number of anomalies detected by
Roots in a 10 hour period varies when the SLO is changed. The number of anomalies
drops noticeably when the response time upper bound is increased. When the $L_3$
SLO (45ms) is used, the only anomalies detected are the ones
caused by our hourly fault injection mechanism. As the SLO is tightened by lowering the upper bound,
Roots detects additional anomalies. These additional anomalies
result from a combination of injected faults, and other naturally occurring faults
in the system. That is, Roots detected some naturally occurring
faults (temporary spikes in application latency), when a number of injected faults
were still in the sliding window of the anomaly detector. Together these two types of
faults caused SLO violations, usually several minutes after the fault injection period
has expired.
Next we analyze how fast Roots can detect anomalies in an application. We
first consider the performance of guestbook under the $L_1$ SLO while
injecting faults into the datastore service. Figure~\ref{fig:time_line_guestbook_2s} shows
anomalies detected by Roots as events on a time line. The horizontal axis represents
passage of time. The red arrows indicate the start of a fault injection period, where each
period lasts up to 3 minutes.
The blue arrows indicate the Roots anomaly detection events.
Note that every fault injection period is immediately followed by an anomaly
detection event, implying near real time reaction from Roots, except in case of the fault
injection window at 20:00 hours. Roots detected another naturally occurring anomaly
(i.e. one
that we did not explicitly inject but nonetheless caused an SLO violation) at 19:52 hours,
which caused the anomaly detector to go into the warm up mode. Therefore Roots
did not immediately react to the faults injected at 20:00 hours. But as soon as the detector became
active again at 20:17, it detected the anomaly.
\begin{figure}
\centering
\includegraphics[scale=0.55]{time_line_guestbook_2s}
\caption{Anomaly detection in guestbook application during a period of 10 hours. Red arrows indicate fault injection
at the datastore service. Blue arrows indicate all anomalies detected by Roots during the experimental run.}
\label{fig:time_line_guestbook_2s}
\end{figure}
\begin{figure}
\centering
\includegraphics[scale=0.55]{time_line_guestbook_2s_user}
\caption{Anomaly detection in guestbook application during a period of 10 hours. Red arrows indicate fault injection
at the user management service. Blue arrows indicate all anomalies detected by Roots during the experimental run.}
\label{fig:time_line_guestbook_2s_user}
\end{figure}
Figure~\ref{fig:time_line_guestbook_2s_user} shows the anomaly detection time line for the
same application and SLO, while faults are being injected into the user management service.
Here too we see that Roots detects anomalies immediately following each fault injection window.
In all of our experiments, Roots detected the injected anomalies
in 158 seconds on average with a maximum time to detection of 289 seconds (i.e. less than 5 minutes).
This duration can be further controlled by changing the analysis rate and
window size of the detectors.
\subsection{Path Distribution Analyzer: Accuracy and Speed}
\begin{figure}
\centering
\includegraphics[scale=0.55]{time_line_crud}
\caption{Anomaly detection in key-value store application during a period of 10 hours. Steady-state traffic is read-heavy. Red arrows
indicate injection of write-heavy bursts. Blue arrows indicate all the anomalies detected by the path distribution
analyzer.}
\label{fig:time_line_crud}
\end{figure}
\begin{figure}
\centering
\includegraphics[scale=0.55]{time_line_caching}
\caption{Anomaly detection in cached key-value store application during a period of 10 hours. Steady-state traffic is mostly
served from the cache. Red arrows
indicate injection of cache-miss bursts. Blue arrows indicate all the anomalies detected by the path distribution
analyzer.}
\label{fig:time_line_caching}
\end{figure}
Next we evaluate the effectiveness and accuracy of the path distribution analyzer. For this we
employ two different applications.
\begin{LaTeXdescription}
\item[key-value store] This application provides the functionality of an online key-value store. It allows
users to store data objects in the cloud where each object is given a unique key. The objects can then be
retrieved, updated or deleted using their keys. Different operations
(create, retrieve, update and delete) are implemented as separate paths of
execution in the application.
\item[cached key-value store] This is a simple extension of the regular key-value store, which adds
caching to the read operation using the AppScale's memcache service. The application contains
separate paths of execution for cache hits and cache misses.
\end{LaTeXdescription}
We first deploy the key-value store on AppScale, and populate it with a number of data objects. Then we
run a test client against it which generates a read-heavy workload. On average this workload
consists of 90\% read requests and 10\% write requests. The test client
is also programmed to randomly send bursts of write-heavy workloads. These bursts consist
of 90\% write requests on average, and each burst lasts up to 2 minutes. Figure~\ref{fig:time_line_crud}
shows the write-heavy bursts as events on a time line (indicated by red arrows). Note that almost every burst is
immediately followed by an anomaly detection event (indicated by blue arrows).
The only time we do not see an anomaly detection event is when multiple
bursts are clustered together in time (e.g. 3 bursts between 17:04 and 17:24 hours). In this
case Roots detects the very first burst, and then goes into the warm up mode to collect more data.
%Therefore the bursts that immediately follow do not raise an alarm.
Between 20:30 and 21:00 hours we also
had two instances where the read request proportion dropped from 90\% to 80\% due to random
chance.
%This is because our test client randomizes the read request proportion around the 90\% mark.
Roots identified these two incidents also as anomalous.
%In these two instances the read proportion was deemed too far off from 90\%, and Roots correctly
%identified them as anomalies.
We conduct a similar experiment using the cached key-value store. Here, we run a test client that generates a workload
that is mostly served from the cache. This is done by repeatedly executing read requests on a small
selected set of object keys. However, the client randomly sends bursts of traffic requesting keys that
are not likely to be in the application cache, thus resulting in many cache misses. Each burst
lasts up to 2 minutes. As shown in
figure~\ref{fig:time_line_caching}, Roots path distribution analyzer correctly detects the change
in the workload (from many cache hits to many cache misses), nearly every time the test client injects a
burst of traffic that triggers the cache miss path of the application. The only exception is when
multiple bursts are clumped together, in which case only the first raises an alarm in Roots.
\subsection{Workload Change Analyzer Accuracy}
\begin{figure}
\centering
\includegraphics[scale=0.5]{workload_change_trace}
\caption{Workload size over time for the key-value store application. The test client randomly sends
large bursts of traffic causing the spikes in the plot. Roots anomaly detection events are shown
in red dashed lines.}
\label{fig:workload_change}
\end{figure}
Next we evaluate the Roots workload change analyzer. In this experiment we run a varying workload
against the key-value store application for 10 hours. The load generating client is programmed
to maintain a mean workload level of 500 requests per minute. However, the client
is also programmed to randomly send large bursts of traffic at times of its choosing. During these bursts
the client may send more than 1000 requests a minute, thus impacting the performance of
the application server that hosts the key-value store. Figure~\ref{fig:workload_change} shows how
the application workload has changed over time. The workload generator has produced 6 large bursts of traffic during the
period of the experiment, which appear as tall spikes in the plot.
Note that each burst is immediately followed by a Roots anomaly detection event (shown by red dashed lines).
In each of these 6 cases, the increase in workload caused a violation of the application performance SLO.
Roots detected the corresponding anomalies, and determined them to be caused by changes in the workload size.
As a result, bottleneck identification was not triggered for any of these anomalies.
Even though the bursts of traffic appear to be momentary
spikes, each burst lasts for 4 to 5 minutes thereby causing a lasting impact on the application performance.
%The PELT change point detection method used in this experimental set up is ideally suited for detecting
%such lasting changes in the workload level.
\subsection{Bottleneck Identification Accuracy}
Next we evaluate the bottleneck identification capability of Roots. We first discuss the results obtained using
the guestbook application, and follow with
results obtained using a more complex application.
In the experimental run illustrated in
figure~\ref{fig:time_line_guestbook_2s}, Roots determined that all the detected anomalies except for one were
caused by the AppScale datastore service. This is consistent with our expectations since in this experiment we
artificially inject faults into the datastore.
The only anomaly that is not traced back to the datastore service is the one that was detected at 14:32 hours.
This is indicated by the blue arrow with a small square marker at the top. For this anomaly, Roots concluded that
the bottleneck is the local execution at the application server ($r$). We have verified
this result by manually inspecting the AppScale logs and traces of data collected by Roots. As it turns out,
between 14:19 and
14:22 the application server hosting the guestbook application experienced some problems, which caused
request latency to increase significantly.
%Therefore we can conclude that Roots has correctly identified
%the root causes of all 18 anomalies in this experimental run
%including one that we did not inject explicitly.
Similarly, in the experiment shown in figure~\ref{fig:time_line_guestbook_2s_user}, Roots determined
that all the anomalies are caused by the user management service, except in one instance. This is again
inline with our expectations since in this experiment we inject faults into the user management service. For the
anomaly detected at 04:30 hours, Roots determined that local execution time is the primary bottleneck.
Like earlier, we have manually verified this diagnosis to be accurate.
%In this case too the server hosting the guestbook application became slow
%during the 04:23 - 04:25 time window, and Roots correctly identified the bottleneck as the local
%application server.
\begin{figure}
\centering
\includegraphics[scale=0.55]{time_line_stocks_1}
\caption{Anomaly detection in stock-trader application during a period of 10 hours. Red arrows indicate fault injection
at the 1st datastore query. Blue arrows indicate all anomalies detected by Roots during the experimental run.}
\label{fig:time_line_stocks_1}
\end{figure}
\begin{figure}
\centering
\includegraphics[scale=0.55]{time_line_stocks_2}
\caption{Anomaly detection in stock-trader application during a period of 10 hours. Red arrows indicate fault injection
at the 2nd datastore query. Blue arrows indicate all anomalies detected by Roots during the experimental run.}
\label{fig:time_line_stocks_2}
\end{figure}
In order to evaluate how the bottleneck identification performs when an application makes more than 2
PaaS kernel invocations, we conduct another experiment using an application
called ``stock-trader''.
This application allows setting up organizations, and simulating trading of stocks between the
organizations. The two main operations in this application are \textit{buy} and \textit{sell}. Each of
these operations makes 8 calls to the AppScale datastore.
According to our previous work~\cite{Jayathilaka:2015:RTS:2806777.2806842}, 8 kernel invocations in the
same path of execution is very rare in web applications developed for a PaaS cloud. The probability
of finding an execution path with more than 5 kernel invocations in a sample of PaaS-hosted
applications is less than 1\%. Therefore the stock-trader application is a good extreme case
example to test the Roots bottleneck identification support.
We execute a number of experimental runs using this application,
and here we present the results from two of them. In all experiments we configure the anomaly
detector to check for the response time SLO of 177ms with 95\% success probability.
In one of our experimental runs we inject faults into the first datastore query executed by the buy operation
of stock-trader. The fault injection logic runs every two hours, and lasts for 3 minutes. The duration of
the full experiment is 10 hours.
Figure~\ref{fig:time_line_stocks_1} shows the resulting event sequence. Note that every fault injection
event is immediately followed by a Roots anomaly detection event. There are also four additional
anomalies in the time line which were SLO violations caused by a combination of injected faults, and
naturally occurring faults in the system. For all the anomalies detected
in this test, Roots correctly selected the first datastore call in the application code as the bottleneck.
The additional four anomalies occurred because a large number of injected faults were in the sliding window
of the detector. Therefore, it is accurate to attribute those anomalies also to the first datastore query
of the application.
Figure~\ref{fig:time_line_stocks_2} shows the results from a similar experiment where we inject
faults into the second datastore query executed by the operation. Here also Roots detects all the
artificially induced anomalies along with a few extras. All the anomalies, except for one,
are determined to be caused by the second
datastore query of the buy operation. The anomaly detected at 08:56 (marked with a square on top of the blue arrow)
is attributed to the fourth datastore query executed by the application. We have manually verified this
diagnosis to be accurate.
%Since 08:27 (when the previous anomaly was detected), the fourth datastore
%query has frequently taken a long time to execute (again, on
%its own), which resulted in an SLO violation at 08:56 hours.
In the experiments illustrated in figures~\ref{fig:time_line_guestbook_2s}, \ref{fig:time_line_guestbook_2s_user},
\ref{fig:time_line_stocks_1}, and \ref{fig:time_line_stocks_2} we maintain
the application request rate steady throughout the 10 hour periods. Therefore,
the workload change analyzer of Roots did not detect any significant shifts in the workload level.
Consequently, none of the anomalies detected in these 4 experiments were attributed to a workload change.
The bottleneck identification was therefore triggered for each anomaly.
To evaluate the agreement level among the four bottleneck candidate selection methods, we analyze 407
anomalies detected by Roots over a period of 3 weeks. We see that except on 13 instances, in all the
remaining cases 2 or more candidate selection methods agreed on the final bottleneck component chosen.
This implies that most of the time (96.8\%) Roots
identifies bottlenecks with high confidence.
%\begin{figure}
%\centering
%\includegraphics[scale=0.5]{bottleneck_scores}
%\caption{Frequency of different bottleneck scores.}
%\label{fig:bottleneck_scores}
%\end{figure}
%Recall that the bottleneck identification algorithm in Roots
%selects up to four candidate components for each performance anomaly detected, and then ranks them
%by assigning scores to identify the most likely bottleneck. Figure~\ref{fig:bottleneck_scores} shows the breakdown of 407 anomalies
%detected over a period of 3 weeks. X-axis represents the different scores given to candidate components
%by our algorithm. Y-axis shows the number of times a particular score was the highest.
%According to this result, on 13 occasions Roots determined the bottleneck based on the highest score
%of 4 (score given to the component identified by the relative importance metric).
%This happens when the algorithm chooses four different candidates
%for the bottleneck. However, this constitutes only 3.2\% of all the anomalies. In 96.8\% of the time Roots saw
%at least two of the four candidates to be the same (score values 6 or higher). This implies that most of the time Roots is able to
%identify bottlenecks with a high level of confidence since two or more candidate detection methods
%agree on their results.
\subsection{Multiple Applications in a Clustered Setting}
\begin{figure}
\centering
\includegraphics[scale=0.55]{time_line_g1g8}
\caption{Anomaly detection in 8 applications deployed in a clustered AppScale cloud. Red arrows
indicate fault injection at the datastore service for queries generated from a specific host. Cross
marks indicate all the anomalies detected by Roots during the experiment.}
\label{fig:time_line_g1g8}
\end{figure}
%So far we have been experimenting with a simple AppScale cloud deployed on a single virtual machine.
To demonstrate how Roots can be used in a multi-node environment, we set up an AppScale cloud
on a cluster of 10 virtual machines (VMs). VMs are provisioned by a Eucalyptus (IaaS)
cloud, and each VM is comprised of 2 CPU cores and 2GB memory. Then we proceed
to deploy 8 instances of the guestbook application on AppScale. We use the multitenant support
in AppScale to register each instance of guestbook as a different application ($G1$ through $G8$).
Each instance
is hosted on a separate application server instance, has its own private namespace on the AppScale
datastore, and can be accessed via a unique URL. We disable auto-scaling support in
the AppScale cloud, and inject faults into the datastore service of AppScale in such a way that queries
issued from a particular VM, are processed with a 100ms delay. We identify the VM by its IP address
in our test environment, and shall refer to it as $V_f$ in the discussion. We trigger
the fault injection every 2 hours, and when activated it lasts for up to 5 minutes. Then we monitor
the applications using Roots for a period of 10 hours. Each anomaly detector is configured
to check for the 75ms response time SLO with 95\% success rate.
ElasticSearch, Logstash and the Roots pod are deployed on a separate VM.
Figure~\ref{fig:time_line_g1g8} shows the resulting event sequence. Note that we detect anomalies
in 3 applications ($G4$, $G6$ and $G7$) immediately after each fault injection. Inspecting the
topology of our AppScale cluster revealed that these were the only 3 applications that were
hosted on $V_f$. As a result, the bi-hourly fault injection caused their SLOs to
get violated. Other applications did not exhibit any SLO violations since we are monitoring against
a very high response time upper bound. In each case Roots detected the SLO violations 2-3 minutes into the fault injection
period. As soon as that happened, the anomaly detectors of $G4$, $G6$ and $G7$ entered the warmup mode.
But our fault injection logic kept injecting faults for at least 2 more minutes. Therefore when the anomaly detectors
reactivated after 25 minutes (time to collect the minimum sample count), they each detected another SLO
violation. As a result, we see another set of detection events approximately half an hour after the
fault injection events.
\begin{table}
\begin{center}
\begin{tabular}{|p{2cm}|p{6cm}|}
\hline
Feature & Results Observed in Roots \\ \hline
Detecting anomalies &
All the artificially induced anomalies were detected, except when multiple anomalies are
clustered together in time. In that case only the first anomaly was detected.
Roots also detected several anomalies that occurred
due to a combination of injected faults, and natural faults. \\ \hline
Characterizing anomalies as
being due to workload changes or bottlenecks &
When anomalies were induced by varying the application workload, Roots correctly determined
that the anomalies were caused by workload changes. In all other cases
we kept the workload steady, and hence the anomalies were attributed to a
system bottleneck. \\ \hline
Identifying correct bottleneck &
In all the cases where bottleneck identification was performed, Roots correctly identified
the bottleneck component. \\ \hline
Reaction time &
All the artificially induced anomalies (SLO violations) were detected as soon as enough samples of the fault
were taken by the benchmarking process (2-5 minutes from the start of the fault injection period). \\ \hline
Path distribution &
All the artificially induced changes to the path distribution were detected. \\
\hline
\end{tabular}
\end{center}
\caption{Summary of Roots efficacy results.
\label{tab:results_summary}
}
\end{table}
%\subsection{Results Summary}
We conclude our discussion of Roots efficacy with a summary of our results. Table~\ref{tab:results_summary}
provides an overview of all the results presented so far, broken down into four features that we wish to see
in an anomaly detection and bottleneck identification system.
\subsection{Roots Performance Overhead and Scalability}
Next we evaluate the performance overhead incurred by Roots on the applications deployed in the
cloud platform. We are particularly interested in understanding the overhead of recording the PaaS kernel
invocations made by each application, since this feature requires some changes to the PaaS kernel
implementation.
We deploy a number of applications on a vanilla
AppScale cloud (with no Roots), and measure their request latencies. We use
the popular Apache Bench tool to measure the request latency under a
varying number of concurrent clients. We then take the same measurements
on an AppScale cloud with Roots, and compare the results against the ones obtained
from the vanilla AppScale cloud. In both environments we disable the auto-scaling
support of AppScale, so that all client requests are served from a single application
server instance. In our prototype implementation of Roots, the kernel invocation events get buffered in
the application server before they are sent to the Roots data storage. We wish to
explore how this feature performs when the application server is under heavy load.
\begin{table}
\begin{center}
\begin{tabular}{|c|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|}
\hline &
\multicolumn{2}{c|}{Without Roots} &
\multicolumn{2}{c|}{With Roots} \\ \hline
App./Concurrency & Mean (ms) & SD & Mean (ms) & SD\\
\hline
guestbook/1 & 12 & 3.9 & 12 & 3.7 \\ \hline
guestbook/50 & 375 & 51.4 & 374 & 53 \\ \hline
stock-trader/1 & 151 & 13 & 145 & 13.7 \\ \hline
stock-trader/50 & 3631 & 690.8 & 3552 & 667.7 \\ \hline
kv store/1 & 7 & 1.5 & 8 & 2.2 \\ \hline
kv store/50 & 169 & 26.7 & 150 & 25.4 \\ \hline
cached kv store/1 & 3 & 2.8 & 2 & 3.3 \\ \hline
cached kv store/50 & 101 & 24.8 & 97 & 35.1 \\ \hline
\end{tabular}
\end{center}
\caption{Latency comparison of applications when running on
a vanilla AppScale cloud vs when running on a Roots-enabled
AppScale cloud.
\label{tab:perf_overhead}
}
\end{table}
Table~\ref{tab:perf_overhead} shows the comparison of request
latencies. We discover that Roots does not add a significant overhead
to the request latency in any of the scenarios considered. In all the cases,
the mean request latency when Roots is in use, is within one standard deviation
from the mean latency when Roots is not in use.
The latency increases with the number of concurrent clients
(since all requests are handled by a single
application server), but still there is no evidence of any detrimental overhead
from Roots even under load. This is due to the asynchronous nature of Roots,
which buffers monitoring events in memory, and reports them to ElasticSearch
out of the request processing flow.
%there is no measurable impact on
%the request latency from Roots.
\begin{figure}
\centering
\includegraphics[scale=0.45]{pod_performance}
\caption{Resource utilization of a Roots pod.}
\label{fig:pod_performance}
\end{figure}
Finally, to demonstrate how lightweight and scalable Roots is, we deploy
a Roots pod on a virtual machine with 4 CPU cores and 4GB memory.
To simulate monitoring multiple applications, we run multiple concurrent anomaly
detectors in the pod. Each detector is configured with a 1 hour sliding window.
We vary the number of concurrent
detectors between 100 and 10000, and run each configuration for
2 hours. We track the memory and CPU usage of the
pod during each of these runs using the jstat and pidstat tools.
Figure~\ref{fig:pod_performance}
illustrates the maximum resource utilization of the Roots pod for different counts of
concurrent anomaly detectors. We see that with 10000 concurrent
detectors, the maximum CPU usage is 238\%, where 400\% is the available limit
for 4 CPU cores. The maximum memory usage in this case is only 778 MB.
Since each anomaly detector operates with a fixed-sized window, and they
bring additional data into memory only when required, the memory
usage of the Roots pod generally stays low.
%In particular, the average resource usage
%is far less than the maximum usage values shown in figure~\ref{fig:pod_performance}.
%For example, in case of 10000 detectors, the average CPU usage is only 32\%, and
%the average memory usage is 507 MB.
We also experimented with larger concurrent
detector counts, and we were able to pack up to 40000 detectors into the pod before
getting constrained by the CPU capacity of our VM.
This result implies that we can monitor tens of thousands
of applications using a single pod, thereby scaling up to a very large number
of applications using only a handful of pods.
| {
"alphanum_fraction": 0.8019158399,
"avg_line_length": 60.3925619835,
"ext": "tex",
"hexsha": "9d3d0c7c3ce9d020b59d4a17b03c5b113c47d69c",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-05-25T02:59:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-05-25T02:59:15.000Z",
"max_forks_repo_head_hexsha": "d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "UCSB-CS-RACELab/eager-appscale",
"max_forks_repo_path": "Eager/paper/tsc16/results.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "UCSB-CS-RACELab/eager-appscale",
"max_issues_repo_path": "Eager/paper/tsc16/results.tex",
"max_line_length": 133,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "d58fe64bb867ef58af19c1d84a5e1ec68ecddd3d",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "UCSB-CS-RACELab/eager-appscale",
"max_stars_repo_path": "Eager/paper/tsc16/results.tex",
"max_stars_repo_stars_event_max_datetime": "2018-07-16T18:20:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-06-12T01:18:49.000Z",
"num_tokens": 6719,
"size": 29230
} |
\subsubsection{2007: Heckmann et al.}
\label{sec:heckmann}
A different approach is implemented by~\citet{heckmann_gumogeneral_2005}.
Divided into four main groups (emotional state, personality, characteristics and
physiological state), the authors present the \acf{gumo}, an ontology model to
characterize users capabilities within adaptive environments. A significant
user characteristic that is taken into account in this work is the stress. In the
adaptive interfaces domain it is needed to pay special attention to the
consequences of each adaptation. But the stress is not only determined by this
process. It is also derived from several user experiences, as the current
context state (e.g. traffic, noise, surrounding people, and so
forth~\citep{babisch_noise_stress_2002}). Figure~\ref{fig:heckmann_model} illustrates
the model presented by~\citeauthor{heckmann_gumogeneral_2005}
% \InsertFig{heckmann_model}{fig:heckmann_model}{Several user model property
% dimensions~\citep{heckmann_gumogeneral_2005}}{}{0.70}{}
\begin{figure}
\centering
\includegraphics[width=0.70\textwidth]{heckmann_model.png}
\caption{Several \ac{gumo} user model property
dimensions~\citep{heckmann_gumogeneral_2005}.}
\label{fig:heckmann_model}
\end{figure}
% ---------------------------------------------------------------------- | {
"alphanum_fraction": 0.7647058824,
"avg_line_length": 47.3571428571,
"ext": "tex",
"hexsha": "f19e4815a815fe5ea7067b7469f755287a5c6836",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eec342383ef4f15968e6417020681a3eb095bf08",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "edlectrico/dissertation",
"max_forks_repo_path": "2_state_of_the_art/user/2_7_heckmann.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "eec342383ef4f15968e6417020681a3eb095bf08",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "edlectrico/dissertation",
"max_issues_repo_path": "2_state_of_the_art/user/2_7_heckmann.tex",
"max_line_length": 85,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "eec342383ef4f15968e6417020681a3eb095bf08",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "edlectrico/dissertation",
"max_stars_repo_path": "2_state_of_the_art/user/2_7_heckmann.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 334,
"size": 1326
} |
\section{Conclusion}~\label{sec:conclusion} High-assurance RV has
the potential of becoming the avenue to assuring
otherwise unassurable IA safety-critical systems. We have presented a
number of challenges that we have identified as barriers to
actualizing high-assurance RV and surveyed how we have addressed these
challenges in the course of our research using the Copilot
framework. We hope this list will be useful to RV researchers as they
apply their own work to safety-critical systems. In addition, we
believe we have demonstrated the efficacy of applying light-weight
formal methods tools to address many of these challenges. Progress on
these issues is likely to come faster if a multidisciplinary approach
is taken with domain specialists, safety engineers and verification tool
builders collaborating with RV researchers. Much work
remains and the list of challenges is likely to grow even as researchers
solve many of the issues raised.
\paragraph{Acknowledgements:} The Copilot project has
been conducted in collaboration with Dr. Lee Pike (Galois). Jonathan
Laurent (ENS Paris) and Chris Hathhorn (University of Missouri) did
most of the coding of the {\tt Copilot.Theorem}. Georges-Axel
Jaloyan (ENS Paris) recently added the monitor verification capabilities. | {
"alphanum_fraction": 0.8159937888,
"avg_line_length": 61.3333333333,
"ext": "tex",
"hexsha": "11e5749aa45a368be2893638d7919460d857bcdd",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Copilot-Language/copilot-discussion",
"max_forks_repo_path": "ISoLA16/conclusion.tex",
"max_issues_count": 30,
"max_issues_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047",
"max_issues_repo_issues_event_max_datetime": "2021-09-07T22:34:17.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-04-01T20:24:19.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Copilot-Language/copilot-discussion",
"max_issues_repo_path": "ISoLA16/conclusion.tex",
"max_line_length": 76,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Copilot-Language/copilot-discussion",
"max_stars_repo_path": "ISoLA16/conclusion.tex",
"max_stars_repo_stars_event_max_datetime": "2021-05-17T13:20:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-06-10T00:44:21.000Z",
"num_tokens": 285,
"size": 1288
} |
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage[USenglish]{babel}
\usepackage{gensymb}
\usepackage{hyperref}
\usepackage{etoolbox}
\usepackage{graphicx}
\usepackage{geometry}
\usepackage{xifthen}
\usepackage{float}
\usepackage{ulem}
\usepackage{multirow}
\edef\restoreparindent{\parindent=\the\parindent\relax}
\usepackage[parfill]{parskip}
\restoreparindent
\usepackage{indentfirst}
\title{Report: Event app for Android\\ INF8405 \textit{Informatique mobile}\\ Winter 2017 }
\author{GRAILLE Raphaël (1815074), LOGUT Adrien (1815142) \\ \& NAMALGAMUWA Chathura (1815118)\\ Ecole Polytechnique de Montréal}
\date{March 23, 2017}
\begin{document}
\maketitle
Submitted to: \textbf{Fabien Berquez}
\newpage
\tableofcontents
\newpage
\section{Introduction}
This is the report for the second lab in the course INF8405. The objective was to create an event organizer app.
\section{Technical details}
\subsection{\href{http://data.whicdn.com/images/28718572/large.gif}{Activities + Fragments}}
The app has 3 activities with explicit names: SignUpActivity, GroupsActivity and MapsActivity. SignUpActivity is called only if a user hasn't been created. The work-flow afterwards is simple GroupsActivity -\textgreater MapsActivity with the possibility to go back.
To show different details or ask for user input, we used \href{https://material.io/guidelines/components/bottom-sheets.html}{bottom sheets}, \textit{à la} Google Maps. We think it blends well with the map and still allows the user to navigate the map while looking at information.
\subsection{\href{http://bestanimations.com/Nature/Fire/simpsons-fire-gif.gif}{Firebase}}
For our backend we used Firebase. Furthermore, for our local cache we also used Firebase instead of SQLLite. While this solution is said to be limiting for large applications, it was sufficient for us. Indeed, otherwise we should have handled ourselves the syncing of the local and online databases, which is quite tedious.
We used a flatten structure as advised by the Firebase guidelines.
A \textit{user} has it's name, avatar, position and references to groups. We also added a timestamp for the location. It allowed us to test the location update frequency easily.
A \textit{group} has a name (which is it's index), an organizer (reference to a user), a list of members (reference to users), a list of locations (references) and a reference to an event.
A \textit{location} has a name, a photo, a position, and votes (indexed by user). An optimization was done at this point. Indeed to figure out if a user had voted for all 3 places, we use the boolean in member list of the associated group. When a user is added to the group it is initially set to false. When he/she has voted for all 3 locations, it is set to true.
An \textit{event} has a name, a position, a starting/ending date, some information and participations (indexed by user). The participation can take 3 integer values to indicate Going/Maybe Going/Not Going.
\subsection{\href{https://media.giphy.com/media/J0qooSNU20Q3m/giphy.gif}{Bonus}}
We added a information text when the user is not connected to the internet or has the GPS disabled, \textit{à la} Facebook Messenger. We wanted to go further and show if users were \href{https://firebase.google.com/docs/database/android/offline-capabilities#section-sample}{connected} but unfortunately didn't have enough time.
\section{Issues encountered}
\subsection{\href{https://cdn.netlify.com/ecf5f8b45c8f47745f2eff6c5938d1fb34c124f3/4848c/img/blog/instant-cache-invalidation-joy.gif}{Local cache}}
Managing the local cache was really hard initially. We started to work with SQLite as recommended. However, we quickly saw it had a lot of drawbacks. For a school project, it seemed too complicated for no addition value to the work. Fortunately, we found that Firebase had an option for offline caching.
\subsection{\href{http://1.bp.blogspot.com/-lvbNllD8faM/U53uHuJGYgI/AAAAAAAAAiY/ZJB1OQj0L-M/s1600/homer.gif}{Work at 2}}
The work to be done was already overwhelming enough that on top of that only 2 of us worked on it.
\section{Additional notes and recommendations}
This second lab was way more overwhelming than the first one. We are still happy to have completed every required functionality and some more, even with one teammate down. The known bug currently is if a location detail fragment is shown and a vote is added/removed by another user, that fragment isn't updated automatically (you need to hide it and re-click on that location). However, we solved that problem with the event details. Indeed, if another user says he/she is coming or not, the information is updated automatically. Therefore, if we had more time, we would have used the same technique to update the location details.
A few recommendations for future labs however:
\begin{itemize}
\item The gap between the first and second lab is a bit too big.
\item Don't ask to use SQLite for local caching. It introduces too many work-flows to handle.
\end{itemize}
\end{document} | {
"alphanum_fraction": 0.7883168317,
"avg_line_length": 58.0459770115,
"ext": "tex",
"hexsha": "5f14843358c891a3ef2d6162f18093e1a6c88bc9",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "aa6e916861b083d55918ab13590bcd806cd7453a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cnamal/INF8405",
"max_forks_repo_path": "Events/report.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "aa6e916861b083d55918ab13590bcd806cd7453a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cnamal/INF8405",
"max_issues_repo_path": "Events/report.tex",
"max_line_length": 632,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "aa6e916861b083d55918ab13590bcd806cd7453a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cnamal/INF8405",
"max_stars_repo_path": "Events/report.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1254,
"size": 5050
} |
% 9.5.07
% This is a sample documentation for Compass in the tex format.
% We restrict the use of tex to the following subset of commands:
%
% \section, \subsection, \subsubsection, \paragraph
% \begin{enumerate} (no-nesting), \begin{quote}, \item
% {\tt ... }, {\bf ...}, {\it ... }
% \htmladdnormallink{}{}
% \begin{verbatim}...\end{verbatim} is reserved for code segments
% ...''
%
\section{Malloc Return Value Used In If Stmt}
\label{MallocReturnValueUsedInIfStmt::overview}
``ALE3D Coding Standards \& Style Guide'' item \#4.5 states that
\begin{quote}
When using raw {\tt malloc()} and {\tt new}, developers should check the return value for {\tt NULL}. This is especially important when allocating large blocks of memory, which may exhaust heap resources.
\end{quote}
\subsection{Parameter Requirements}
This checker takes no parameters and inputs source file.
\subsection{Implementation}
This pattern is checked using a simple AST traversal that seeks out function references to malloc. Then the parent nodes are traversed up until a basic scope block is found at which point a nested AST traversal seeks If-statement conditional expressions containing the memory block returned from malloc. If no such If-statement conditional is found in the immediate basic containing block scope then an error is flagged.
\subsection{Non-Compliant Code Example}
The non-compliant code fails to check the return value of {\tt malloc()}.
\begin{verbatim}
#include <stdlib.h>
int main()
{
int *iptr = (int*)malloc( 256*sizeof(int) );
return 0;
} //main()
\end{verbatim}
\subsection{Compliant Solution}
The compliant solution uses an if statement to check the return value of {\tt malloc()} for {\tt NULL}.
\begin{verbatim}
#include <stdlib.h>
int main()
{
int *iptr = (int*)malloc( 256*sizeof(int) );
if( iptr == NULL )
return 1;
return 0;
} //main()
\end{verbatim}
\subsection{Mitigation Strategies}
\subsubsection{Static Analysis}
Compliance with this rule can be checked using structural static analysis checkers using the following algorithm:
\begin{enumerate}
\item Perform AST traversal visiting function call nodes corresponding to {\tt malloc()}.
\item For each call to {\tt malloc()} traverse its parent nodes until an if statement or the end of a basic block is reached.
\item If an if statement is encountered, check that the if statement performs a comparison involving the return value from {\tt malloc()}; if this is not the case then flag a violation.
\item If a basic block is reached, then flag a violation as the return value of {\tt malloc()} may be out of scope.
\item Report any violations.
\end{enumerate}
\subsection{References}
Arrighi B., Neely R., Reus J. ``ALE3D Coding Standards \& Style Guide'', 2005.
| {
"alphanum_fraction": 0.7446499819,
"avg_line_length": 37.2567567568,
"ext": "tex",
"hexsha": "5ed95ff274bf1d1e2ccab65d438c4d2e7744eaef",
"lang": "TeX",
"max_forks_count": 146,
"max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z",
"max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "sujankh/rose-matlab",
"max_forks_repo_path": "projects/compass/extensions/checkers/mallocReturnValueUsedInIfStmt/mallocReturnValueUsedInIfStmtDocs.tex",
"max_issues_count": 174,
"max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "sujankh/rose-matlab",
"max_issues_repo_path": "projects/compass/extensions/checkers/mallocReturnValueUsedInIfStmt/mallocReturnValueUsedInIfStmtDocs.tex",
"max_line_length": 420,
"max_stars_count": 488,
"max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "maurizioabba/rose",
"max_stars_repo_path": "projects/compass/extensions/checkers/mallocReturnValueUsedInIfStmt/mallocReturnValueUsedInIfStmtDocs.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z",
"num_tokens": 655,
"size": 2757
} |
%% start of file `template.tex'.
%% Copyright 2006-2013 Xavier Danaux ([email protected]).
%% Copyright 2013-2016 Jorge Martinez Lopez ([email protected]).
%
% This work may be distributed and/or modified under the
% conditions of the LaTeX Project Public License version 1.3c,
% available at http://www.latex-project.org/lppl/.
\documentclass[11pt,a4paper,sans]{moderncv} % possible options include font size ('10pt', '11pt' and '12pt'), paper size ('a4paper', 'letterpaper', 'a5paper', 'legalpaper', 'executivepaper' and 'landscape') and font family ('sans' and 'roman')
% moderncv themes
\moderncvstyle{casual} % style options are 'casual' (default), 'classic', 'oldstyle' and 'banking'
\moderncvcolor{blue} % color options 'blue' (default), 'orange', 'green', 'red', 'purple', 'grey' and 'black'
%\renewcommand{\familydefault}{\sfdefault} % to set the default font; use '\sfdefault' for the default sans serif font, '\rmdefault' for the default roman one, or any tex font name
%\nopagenumbers{} % uncomment to suppress automatic page numbering for CVs longer than one page
% character encoding
\usepackage[utf8]{inputenc} % if you are not using xelatex ou lualatex, replace by the encoding you are using
% adjust the page margins
\usepackage[scale=0.75]{geometry}
%\setlength{\hintscolumnwidth}{3cm} % if you want to change the width of the column with the dates
%\setlength{\makecvtitlenamewidth}{10cm} % for the 'classic' style, if you want to force the width allocated to your name and avoid line breaks. be careful though, the length is normally calculated to avoid any overlap with your personal info; use this at your own typographical risks...
% personal data
\firstname{Jorge}
\familyname{Martínez~López}
\title{Telecommunications Operations Manager}
\address{London}{United Kingdom}
\phone[mobile]{+44~7414861067}
\phone[fixed]{+44~2035989989}
\email{[email protected]}
%\homepage{about.me/jorgeml}
\social[linkedin]{jorgeml}
%\social[twitter]{jorgeml}
\social[github]{jorgeml}
%\extrainfo{additional information}
%\photo[64pt][0.4pt]{picture} % '64pt' is the height the picture must be resized to, 0.4pt is the thickness of the frame around it (put it to 0pt for no frame) and 'picture' is the name of the picture file
%\quote{Some quote}
% to show numerical labels in the bibliography (default is to show no labels); only useful if you make citations in your resume
%\makeatletter
%\renewcommand*{\bibliographyitemlabel}{\@biblabel{\arabic{enumiv}}}
%\makeatother
%\renewcommand*{\bibliographyitemlabel}{[\arabic{enumiv}]% CONSIDER REPLACING THE ABOVE BY THIS
% bibliography with mutiple entries
%\usepackage{multibib}
%\newcites{book,misc}{{Books},{Others}}
%----------------------------------------------------------------------------------
% content
%----------------------------------------------------------------------------------
\begin{document}
%----- resume ---------------------------------------------------------
\makecvtitle
\section{Experience}
\subsection{Professional Experience}
\cventry{2013--Now}{High Touch Operations Manager}{Cisco Systems}{United Kingdom}{}{
\begin{itemize}
\item Delivered premium high-touch technical support services to multiple platforms for a strategic global service provider.
Being familiar with the customer network, organization and support history, acted as the main point of contact between the Cisco organization and the customer to ensure timely resolution of critical incidents.
\item Carried out reporting, case reviews, SLA calculations, trending analysis, and QBRs.
\item Lead service improvement initiatives that resulted in significant cost savings for the customer and a higher quality service delivery.
\item Became a trusted advisor for customers and colleagues globally by sharing best practices.
\item Promoted to team lead after two years in the job.
\end{itemize}
}
\cventry{2009--2012}{Technical Project Manager}{Alcatel-Lucent}{the Netherlands}{}{Consultant in the Global Customer Delivery team in the Benelux.
\begin{itemize}
\item System Integrator Lead for three large projects in the IMS Solution Integration Office for a large Dutch wire-line operator:
\begin{itemize}
\item Supervised a Technical Project Manager in each project, overseeing schedules and resource planning on all three projects, and liaising with the other leads.
\item Led pre-sales support by providing requirements definition and commercial offers for the three projects. Offer definition included in-house hardware and services plus third party resources.
\item Responsible for vendor management within each project, including compliance, offer management and regular project activities.
\item Mentoring activities and on the job training for two new hires for almost six months.
\end{itemize}
\item Tool specialist consultant in a managed services project for a Belgian 2G/3G mobile operator:
\begin{itemize}
\item Responsible for the Operations Support Systems (OSS) tooling (BMC Remedy ticketing, Netcool alarms) for both incident management and new requests to the service provider.
\item Involved in ITIL-based Operations and Rollout processes design, implementation and improvement.
\item Supported integration activities providing input to the project team regarding tools and processes.
\item Responsible of the delivery and agreement with the customer of 100+ operational reports (including KPIs).
\item Contributed to the disaster recovery plan and co-authored the exit plan.
\end{itemize}
\end{itemize}}
%\cventry{2008}{Research \& Development Intern}{Smart Signs Solutions}{the Netherlands}{}{Worked part-time a few months to finance a study trip to Japan.
%\begin{itemize}
%\item Development of guest detection functionality using RFID in a smart wayfinding
%platform.
%\item Customized deployment of a Linux distribution specific for low computing power devices.
%\item Investigation and resolution of wireless networking issues.
%\end{itemize}
%}
%\cventry{2006}{Innovation Department Intern}{Bankinter}{Madrid, Spain}{}{Second-line technical support of the mobile applications developed by the bank: mobile banking and stock trading.\newline{}
%Wrote documentation for the first-line of support team.}
%\subsection{Miscellaneous}
%\cventry{year--year}{Job title}{Employer}{City}{}{Description}
\subsection{Volunteer Experience}
\cventry{2004--2007}{Various roles}{Erasmus Student Network}{}{}
{
%\begin{itemize}
%\item Joined ESN Carlos III (Madrid, Spain) and afterwards ESN Twente (Enschede, the Netherlands) during my studies.
%\item Organized local, national and international events and activities.
%\item Mentored incoming international students.
%\item Promoted the Socrates/Erasmus internship by providing information and advice to prospective students.
%\item Collaborated with university management.
%\end{itemize}
}
%\cventry{2006--2007}{WebTeam Member}{Erasmus Student Network}{}{}
%{
%\begin{itemize}
%\item Contributed to the development and deployment of a local section website template based on Drupal.
%\end{itemize}
%}
%\cventry{2006--2007}{Council of National Representatives, Spain's ViceNational Representative}{Erasmus Student Network}{}{}
%{
%\begin{itemize}
%\item Represented the Spanish sections at the international meetings.
%\end{itemize}
%}
\section{Education and Training}
%\subsection{Courses}
%\cventry{2013}{Computing for Data Analysis}{Johns Hopkins University}{Coursera}{}{}
%\cventry{2013}{An Introduction to Operations Management}{University of Pennsylvania}{Coursera}{}{}
%\cventry{2013}{Gamification}{University of Pennsylvania}{Coursera}{}{}
%\cventry{2013}{Think Again: How to Reason and Argue}{Duke University}{Coursera}{}{}
\subsection{Certifications}
\cventry{2016}{ITIL® Intermediate certificate in IT Service Transition}{BCS Professional Certification}{}{}{}
\cventry{2016}{ITIL® Intermediate certificate in IT Service Operation}{BCS Professional Certification}{}{}{}
\cventry{2015}{Emergency First Aid at Work and Automated External Defibrillation}{Cisco}{}{}{}
%\cventry{2014}{Cisco Sales Expert}{Cisco}{}{}{}
%\cventry{2013}{ITIL® Foundation Certificate in IT Service Management}{BCS Professional Certification}{}{}{}
\cventry{2009}{Foundation Certificate in Software Testing (ISTQB)}{BCS Professional Certification}{}{}{}
\subsection{Professional trainings}
\cventry{2017}{AWS Technical Essentials}{Amazon Web Services}{}{}{}
\subsection{Degrees}
\cventry{2008}{M.Sc. in Telematics}{University of Twente}{Enschede, the Netherlands}{}{Thesis in ``Dynamic Service Composition in an Innovative Communication Environment''.\newline{}
``Bonsai'' Study Tour to Japan in February 2008, topic ``Smart Surroundings''.}
\cventry{2008}{Degree in Telecommunications Engineering}{Universidad Carlos III de Madrid}{Madrid, Spain}{}{Majored in ``Planning and
Management of Telecommunications'', including management courses not specific to telecommunications, such as business administration, human resources, quality management, and innovation.
\newline{}Socrates/Erasmus student exchange program at the University of Twente from September 2003 to March 2004.}
\subsection{Master thesis}
\cvitem{title}{\emph{Dynamic Service Composition in an Innovative Communication Environment'' (EU FP6 SPICE)}}
\cvitem{supervisors}{L.~Ferreira Pires, M.~van Sinderen, E.M.~Gon\c{c}alves da Silva}
%\cvitem{description}{Dynamic Service Composition enables the creation and delivery of new customised and attentive services to the end-user upon a service request, at run-time. These new services are made by composing already existing ones.}
\section{Languages}
\cvitemwithcomment{Spanish}{Native}{}
\cvitemwithcomment{English}{Full professional proficiency}{}
\cvitemwithcomment{Dutch}{Professional working proficiency}{}
%\section{Computer skills}
%\cvdoubleitem{category 1}{XXX, YYY, ZZZ}{category 4}{XXX, YYY, ZZZ}
%\cvdoubleitem{category 2}{XXX, YYY, ZZZ}{category 5}{XXX, YYY, ZZZ}
%\cvdoubleitem{category 3}{XXX, YYY, ZZZ}{category 6}{XXX, YYY, ZZZ}
\section{Interests}
\cvitem{Basketball}{Played at university and at a regional league in the Netherlands.}
\cvitem{Free software}{Linux hobbyist, I contribute by reporting bugs, participating in mailing lists, etc.}
\cvitem{Travelling}{Visited many countries, mostly within Europe.}
\cvitem{Photography}{Taking photos of London (and other places) whenever I have the chance.}
%\section{Awards}
%\cvlistitem{Cisco's ``Pinnacle Award'' runner-up (FY2014)}
%\cvlistitem{Cisco's ``IC Recognition'' award (Q1FY2014).}
%\cvlistitem{Alcatel-Lucent's ``A Night Out'' for the outstanding effort in the Belgian mobile operator project.}
%\cvlistitem{Erasmus Student Network Alumnus (Honorary life-time membership) for my contribution during a period of three years in two local sections. Participated and sometimes organized national and international events (workshops, meetings\ldots). Served as Vice-National Representative for Spain for a year.}
\section{Additional information}
\cvlistitem{Latest version of this résumé available at \url{https://github.com/jorgeml/resume/raw/master/resume.pdf}.}
%\renewcommand{\listitemsymbol}{-~} % change the symbol for lists
%\section{Extra 2}
%\cvlistdoubleitem{Item 1}{Item 4}
%\cvlistdoubleitem{Item 2}{Item 5\cite{book1}}
%\cvlistdoubleitem{Item 3}{}
% Publications from a BibTeX file without multibib
% for numerical labels: \renewcommand{\bibliographyitemlabel}{\@biblabel{\arabic{enumiv}}}
% to redefine the heading string ("Publications"): \renewcommand{\refname}{Articles}
%\nocite{*}
%\bibliographystyle{plain}
%\bibliography{publications} % 'publications' is the name of a BibTeX file
% Publications from a BibTeX file using the multibib package
%\section{Publications}
%\nocitebook{book1,book2}
%\bibliographystylebook{plain}
%\bibliographybook{publications} % 'publications' is the name of a BibTeX file
%\nocitemisc{misc1,misc2,misc3}
%\bibliographystylemisc{plain}
%\bibliographymisc{publications} % 'publications' is the name of a BibTeX file
\clearpage
%----- letter ---------------------------------------------------------
% recipient data
% \recipient{Company Recruitment team}{Company, Inc.\\123 somestreet\\some city}
% \date{\today}
% \opening{Dear Sir or Madam,}
% \closing{Yours faithfully,}
% \enclosure[Attached]{curriculum vit\ae{}} % use an optional argument to use a string other than "Enclosure", or redefine \enclname
% \makelettertitle
% I am applying for the XXXXXXXX opportunity at XXXXXXXX as advertised on XXXXXXXX. This position may fit very well with my education, experience and career interests and I am confident that I can perform the job effectively.
%
% I am a Telecommunications professional with more than three years of work experience. In my previous role I worked as System Integration Lead (Technical Project Manager and Presales support) for the solution integration team for a Dutch telecommunications operator, leading projects that introduced new services in a complex VoIP (IMS) telecommunications network. Previously I worked as a project consultant in managed services for a mobile operator in Belgium. My activities included participating in the design and implementation of ITIL processes, taking the responsibility of delivering operational reports to the customer (including KPIs), supporting to the integration projects and, during the last months in the project, I was solely responsible of the communication with the Operations Support tooling service provider.
%
% I have excellent communication skills and I am used to deal with a variety of people on different levels (team, peers, customers, suppliers and all levels of management). My mother tongue is Spanish, I am fluent in English and I have a good level of Dutch.
% I am certified in software testing (ISTQB Foundation level).
% I enjoy sharing my knowledge with others and I am always looking forward to learning new things.
%
% Through my work experience, I have developed some key strengths for success in this role including:
% \begin{itemize}
% \item Passion for technology.
% \item Teamwork, willingness to help and be helped.
% \item Inter-cultural skills gained living abroad and being member of an international student organization.
% \item Strong customer focus developed in a high-demanding managed services project.
% \end{itemize}
%
% My résumé provides additional information on my education and experience.
%
% I would appreciate the opportunity to discuss my background and qualifications with you in further detail.
%
% Thank you for your time and consideration. I look forward to hearing from you soon.
%
% \makeletterclosing
\end{document}
%% end of file `template.tex'.
| {
"alphanum_fraction": 0.75875748,
"avg_line_length": 61.9708333333,
"ext": "tex",
"hexsha": "542b62b904951ac6485c49ecd2f9654d8745db8d",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c73668d56de2e01e906826c55e68ad70a9984450",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "jorgeml/resume",
"max_forks_repo_path": "resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c73668d56de2e01e906826c55e68ad70a9984450",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "jorgeml/resume",
"max_issues_repo_path": "resume.tex",
"max_line_length": 829,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c73668d56de2e01e906826c55e68ad70a9984450",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "jorgeml/resume",
"max_stars_repo_path": "resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3544,
"size": 14873
} |
% based on the fantastic work from http://www.stdout.org/~winston/latex/
\documentclass[10pt]{article}
\usepackage{multicol}
\usepackage{calc}
\usepackage{ifthen}
\usepackage{geometry}
% conditional page margins based on paper size
\ifthenelse{\lengthtest { \paperwidth = 11in}}
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
}
% remove page header and footer
\pagestyle{empty}
% redefine section commands to use less space
\makeatletter
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
{-1ex plus -.5ex minus -.2ex}%
{0.5ex plus .2ex}%x
{\normalfont\large\bfseries}}
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
{-1explus -.5ex minus -.2ex}%
{0.5ex plus .2ex}%
{\normalfont\normalsize\bfseries}}
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
{-1ex plus -.5ex minus -.2ex}%
{1ex plus .2ex}%
{\normalfont\small\bfseries}}
\makeatother
% disable section numbering
\setcounter{secnumdepth}{0}
\setlength{\parindent}{0pt}
\setlength{\parskip}{0pt plus 0.5ex}
\begin{document}
\raggedright
\footnotesize
\begin{multicols}{2}
% multicol parameters
\setlength{\premulticols}{1pt}
\setlength{\postmulticols}{1pt}
\setlength{\multicolsep}{1pt}
\setlength{\columnsep}{2pt}
% header
\begin{center}
\Large{\textbf{Pegged Cheat Sheet}} \\
\end{center}
\section{Rules}
\begin{tabular}{@{}ll@{}}
\verb! < ! & Creates a space consuming sequence. \\
\verb! <~! & Concatenates a sequences of matches into one string. \\
\verb! <:! & Creates a sequence to be discarded. \\
\verb! <;! & Creates a sequence stored in the parent node. \\
\end{tabular}
Every operator suffix of a \verb!<! rule will work on token literals and on child rules from the parent context.
\section{Non-Terminals}
\begin{tabular}{@{}ll@{}}
\verb!book! & Default is two-sided. \\
\end{tabular}
\section{Terminals}
\begin{tabular}{@{}ll@{}}
\verb!book! & Default is two-sided. \\
\end{tabular}
% footer
\rule{0.3\linewidth}{0.25pt}
\scriptsize
Copyright \copyright\ 2012 Pegged Developers \& Contributors
\end{multicols}
\end{document} | {
"alphanum_fraction": 0.5932517612,
"avg_line_length": 32.1071428571,
"ext": "tex",
"hexsha": "f77d07dce734be4b46bb786d410a0c8ba754a02b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ef04c32b8289111298b4c85770ffbe6440066eb0",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "chadjoan/xdc-pegged",
"max_forks_repo_path": "pegged/docs/cheatsheet/cheatsheet.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ef04c32b8289111298b4c85770ffbe6440066eb0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "chadjoan/xdc-pegged",
"max_issues_repo_path": "pegged/docs/cheatsheet/cheatsheet.tex",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ef04c32b8289111298b4c85770ffbe6440066eb0",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "chadjoan/xdc-pegged",
"max_stars_repo_path": "pegged/docs/cheatsheet/cheatsheet.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 793,
"size": 2697
} |
% Author: Cristian Gonzales
% Created for Physical Time, 2018
\documentclass[11pt]{article}
\usepackage[margin=1in]{geometry}
\usepackage[utf8]{inputenc}
\usepackage[english]{babel}
\usepackage[document]{ragged2e}
\newcommand\tab[1][1cm]{\hspace*{#1}}
\begin{document}
\Large{\textbf{Sprint 1 Report}}\\
\Large{\textbf{Product: Physical Time iOS Application}}\\
\Large{\textbf{Team: The Physical Time Team}}\\
\Large{\textbf{Date: February 4, 2018}}\\
\vspace{-3mm}
\section{Actions to stop doing}
\vspace{-3mm}
\tab \normalsize{Based off our last sprint, we believe that we should stay committed to keeping sprint meetings on Wednesday and Friday over Skype or Google Hangouts. Either forum will suffice. We are not in close proximity of each other, so at the current time, this is most convenient.}
\section{Actions to start doing}
\vspace{-3mm}
\tab \normalsize{Actions that we should partake in is better communication on forums such as Slack and being more organized with Scrum documents. We believe that fixing this will keep our developers focused, punctual, and in the loop of the current status of our sprint (outside of sprint meetings, that is). Also, we should keep disciplined on the key things that are needed to be discussed during the sprint meeting. Hence, we will follow the Scrum framework more religiously, and perform full stand-up meetings.}
\section{Actions to keep doing}
\vspace{-3mm}
\tab \normalsize{Things we should continue to do that will keep the dynamic of our group healthy is contributing and having a voice at Scrum meetings. We believe that adding more functionality in early stages will set us up for success in the long run.}
\section{Work completed \& not completed}
\vspace{-3mm}
\normalsize{The following user stories from the sprint plan were completed:}\\
\vspace{-3mm}
\begin{itemize}
\item As a developer, I want to learn and get used to the frameworks, microframeworks, and development environment needed in order to build an iOS application.
\item As a developer, I want to learn Swift and xCode
\end{itemize}
\vspace{-3mm}
\normalsize{The uncompleted user stories are as follows:}\\
\vspace{-3mm}
\begin{itemize}
\item As a developer, I want to be able to gather critical data points (e.g. sunrise and sunset) that will help visualize physical time relative to the user's location.
\end{itemize}
\vspace{-3mm}
\section{Work completion rate}
\vspace{-3mm}
\normalsize{Sprint 1}\\
\vspace{-3mm}
\begin{itemize}
\item Total number of user stories completed during the sprint: 1
\vspace{-3mm}
\item Total number of estimated ideal work hours completed during the sprint: 23
\vspace{-3mm}
\item Total number of days during the sprint: 14
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.7502679528,
"avg_line_length": 45.1451612903,
"ext": "tex",
"hexsha": "8e5371efa56249bc48bbfbae3e324c82efb35d97",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3563c21d0c34503dcb4e82975e20c82621f9efef",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Physical-TIme/Physical-Time",
"max_forks_repo_path": "scrum/sprint1/PTSprint1Report.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3563c21d0c34503dcb4e82975e20c82621f9efef",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Physical-TIme/Physical-Time",
"max_issues_repo_path": "scrum/sprint1/PTSprint1Report.tex",
"max_line_length": 517,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3563c21d0c34503dcb4e82975e20c82621f9efef",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Physical-TIme/Physical-Time",
"max_stars_repo_path": "scrum/sprint1/PTSprint1Report.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 781,
"size": 2799
} |
\documentclass{article}
\usepackage{fancyhdr}
\usepackage{extramarks}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{amsfonts}
\usepackage{tikz}
\usepackage{physics}
\usepackage[plain]{algorithm}
\usepackage{algpseudocode}
\usepackage{hyperref}
\usetikzlibrary{automata,positioning}
%
% Basic Document Settings
%
\topmargin=-0.45in
\evensidemargin=0in
\oddsidemargin=0in
\textwidth=6.5in
\textheight=9.0in
\headsep=0.25in
\linespread{1.1}
\pagestyle{fancy}
\lhead{\hmwkAuthorName}
\chead{\hmwkClass\ : \hmwkTitle}
\rhead{\firstxmark}
\lfoot{\lastxmark}
\cfoot{\thepage}
\renewcommand\headrulewidth{0.4pt}
\renewcommand\footrulewidth{0.4pt}
\setlength\parindent{0pt}
%
% Create Problem Sections
%
\newcommand{\be}{\begin{equation}}
\newcommand{\ee}{\end{equation}}
\newcommand{\bes}{\begin{equation*}}
\newcommand{\ees}{\end{equation*}}
\newcommand{\bea}{\begin{flalign*}}
\newcommand{\eea}{\end{flalign*}}
\newcommand{\enterProblemHeader}[1]{
\nobreak\extramarks{}{Problem \arabic{#1} continued on next page\ldots}\nobreak{}
\nobreak\extramarks{Problem \arabic{#1} (continued)}{Problem \arabic{#1} continued on next page\ldots}\nobreak{}
}
\newcommand{\exitProblemHeader}[1]{
\nobreak\extramarks{Problem \arabic{#1} (continued)}{Problem \arabic{#1} continued on next page\ldots}\nobreak{}
\stepcounter{#1}
\nobreak\extramarks{Problem \arabic{#1}}{}\nobreak{}
}
\setcounter{secnumdepth}{0}
\newcounter{partCounter}
\newcounter{homeworkProblemCounter}
\setcounter{homeworkProblemCounter}{1}
\nobreak\extramarks{Problem \arabic{homeworkProblemCounter}}{}\nobreak{}
%
% Homework Problem Environment
%
% This environment takes an optional argument. When given, it will adjust the
% problem counter. This is useful for when the problems given for your
% assignment aren't sequential. See the last 3 problems of this template for an
% example.
%
\newenvironment{homeworkProblem}[1][-1]{
\ifnum#1>0
\setcounter{homeworkProblemCounter}{#1}
\fi
\section{Problem \arabic{homeworkProblemCounter}}
\setcounter{partCounter}{1}
\enterProblemHeader{homeworkProblemCounter}
}{
\exitProblemHeader{homeworkProblemCounter}
}
%
% Homework Details
% - Title
% - Due date
% - Class
% - Section/Time
% - Instructor
% - Author
%
\newcommand{\hmwkTitle}{Assignment\ \#2}
\newcommand{\hmwkDueDate}{Due on 4th February, 2019}
\newcommand{\hmwkClass}{Advanced Statistical Mechanics}
\newcommand{\hmwkClassTime}{}
\newcommand{\hmwkClassInstructor}{}
\newcommand{\hmwkAuthorName}{\textbf{Aditya Vijaykumar}}
%
% Title Page
%
\title{
%\vspace{2in}
\textmd{\textbf{\hmwkClass:\ \hmwkTitle}}\\
\normalsize\vspace{0.1in}\small{\hmwkDueDate\ }\\
% \vspace{3in}
}
\author{\hmwkAuthorName}
\date{}
\renewcommand{\part}[1]{\textbf{\large Part \Alph{partCounter}}\stepcounter{partCounter}\\}
%
% Various Helper Commands
%
% Useful for algorithms
\newcommand{\alg}[1]{\textsc{\bfseries \footnotesize #1}}
% For derivatives
\newcommand{\deriv}[1]{\frac{\mathrm{d}}{\mathrm{d}x} (#1)}
% For partial derivatives
\newcommand{\pderiv}[2]{\frac{\partial}{\partial #1} (#2)}
% Integral dx
\newcommand{\dx}{\mathrm{d}x}
% Alias for the Solution section header
\newcommand{\solution}{\textbf{\large Solution}}
% Probability commands: Expectation, Variance, Covariance, Bias
\newcommand{\E}{\mathrm{E}}
\newcommand{\Var}{\mathrm{Var}}
\newcommand{\Cov}{\mathrm{Cov}}
\newcommand{\Bias}{\mathrm{Bias}}
\begin{document}
\maketitle
(\textbf{Acknowledgements} - I would like to thank Aditya Sharma and Junaid Majeed for discussions.)
\begin{homeworkProblem}[1]
The two-particle Virial Coefficient $ b_2 $ is given by,
\begin{align*}
b_2 &= \int \dd^d \va{q}_1 \dd^d \va{q}_2 U(\va{q}_1 - \va{q}_2) \\
&= A S_{d-1}^2 \int \dd q_1 \dd{q_2} q_1^{d-1} q_2^{d-1} \dfrac{1}{\abs{q_1 - q_2}^\sigma}
\end{align*}
\end{homeworkProblem}
\begin{homeworkProblem}[2]
\textcolor{red}{Do Part (a)}\\
\textbf{Part (b)}\\
Let's denote the Vandermonde determinant by $ D_n $,
\begin{align*}
D_n = \mdet{1 & x_1 & x_1^2 & \ldots & x_1^{n-1}\\ 1 & x_2 & x_2^2 & \ldots & x_2^{n-1} \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 1 & x_n & x_n^2 & \ldots & x_n^{n-1}}
\end{align*}
We prove the required statement by using row and column operations. We first use $ R_n \rightarrow R_n - R_{n-1}, n=2,3 \ldots, n $. We then have,
\begin{align*}
D_n = \mdet{1 & x_1 & x_1^2 & \ldots & x_1^{n-1}\\ 0 & x_2 - x_1 & x_2^2 - x_1^2 & \ldots & x_2^{n-1} - x_1^{n-1} \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & x_n - x_{1} & x_n^2 - x_{1}^2 & \ldots & x_n^{n-1} - x_{1}^{n-1}}
\end{align*}
We now proceed to make the topmost row elements $ 0 $, save for the first element.
We use $ C_n \rightarrow C_n - x_1 C_{n-1}, n=2,3 \ldots, n $,
\begin{align*}
D_n &= \mdet{1 & 0 & 0& \ldots & 0\\ 0 & x_2 - x_1 & x_2(x_2 - x_1) & \ldots & x_2^{n-2}(x_2 - x_1) \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & x_n - x_{1} & x_n(x_n - x_{1}) & \ldots & x_n^{n-2}(x_n - x_{1}) } \\
&=\mdet{ x_2 - x_1 & x_2(x_2 - x_1) & \ldots & x_2^{n-2}(x_2 - x_1) \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ x_n - x_{1} & x_n(x_n - x_{1}) & \ldots & x_n^{n-2}(x_n - x_{1}) } \\
&=\qty(\prod_{i=2}^{n} x_i - x_{1}) \mdet{1 & x_2 & x_2^2 & \ldots & x_2^{n-2}\\ 1 & x_3 & x_3^2 & \ldots & x_3^{n-2} \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ 1 & x_n & x_n^2 & \ldots & x_n^{n-2}} \\
\end{align*}
\begin{align*}
\implies D_n &= \qty(\prod_{i=2}^{n} x_i - x_{1}) D_{n-1} \\
D_n &= \qty(\prod_{i=2}^{n} x_i - x_{1}) \qty(\prod_{i=3}^{n-1} x_i - x_{2}) D_{n-2}\\
&= \qty(\prod_{i=2}^{n} x_i - x_{1}) \qty(\prod_{i=3}^{n-1} x_i - x_{2}) \ldots D_2\\
D_n &= \prod_{1\le j<i \le n} x_i - x_j
\end{align*}
Hence Proved.
\end{homeworkProblem}
\begin{homeworkProblem}[4]
For $ N $ particles in a harmonic trap,
\begin{align*}
\psi(x_1, x_2 , \ldots x_N) = \dfrac{1}{\sqrt{N!}} \det\phi_j (x_j) = \dfrac{1}{N!} \det A_{ij} \\
\qq{where} \phi_i(x) = \qty(\dfrac{a^2}{\pi})^{1/4} \dfrac{1}{\sqrt{2^i i!}} H_i(a x) e^{-a^2 x^2/2} \qq{,} a^2 = \dfrac{m \omega}{\hbar}
\end{align*}
The probability density is,
\begin{align*}
P(\{ x_i\}) &= \dfrac{1}{N!} \det A^T A = \dfrac{1}{N!} \det K
\end{align*}
We define the average number density as,
\begin{align*}
\ev{\rho(x)} &= \sum_i \ev{\dfrac{1}{N} \delta (x- x_i)}\\
&= \dfrac{1}{N} \sum_i \int \prod_j \dd{x_j} \delta (x- x_i) P(\{x_k\})
\end{align*}
Let's have a closer look at the integral above. The integral over the delta function with replace the $ x_i $ in $ P(\{x_k\}) $ with $ x $. Expanding the sum over $ i $ will give us $ N $ terms, each having one argument replaced by $ x $. But we know that $ P(x_1, x_2) = P(x_2, x_1)$, and hence we can relabel the terms in the summation, and get the following expression,
\begin{align*}
\ev{\rho(x)} &= \int \prod_{j=1}^{N-1} \dd{x_j} P(x, x_1, x_2 , \ldots, x_{N-1})\\
&= \int \prod_{j=1}^{N-1} \dd{x_j} \dfrac{1}{N!} \det K
\end{align*}
\end{homeworkProblem}
\end{document}
| {
"alphanum_fraction": 0.6497039752,
"avg_line_length": 32.5412844037,
"ext": "tex",
"hexsha": "7a49eed03af88883ebde37a82816cc08b8f2912f",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c0aebb67332ccf0b116a3348923ab2631b586dac",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "adivijaykumar/courses",
"max_forks_repo_path": "sem2/stat/assign_2/assign_2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c0aebb67332ccf0b116a3348923ab2631b586dac",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "adivijaykumar/courses",
"max_issues_repo_path": "sem2/stat/assign_2/assign_2.tex",
"max_line_length": 373,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c0aebb67332ccf0b116a3348923ab2631b586dac",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "adivijaykumar/courses",
"max_stars_repo_path": "sem2/stat/assign_2/assign_2.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2859,
"size": 7094
} |
\documentclass[main.tex]{subfiles}
\begin{document}
\section{Lattices and crystal structure} \label{sec:lattice}
\subsection{Theory} \label{sec:lattice_theory}
There are three parts to the description of a crystal structure. First is the lattice. This is the mathematical ``framework'' upon which the physical part of the crystal lies. It can be defined in a number of ways, however the one we will use here is standard and can be found in for example \cite{simon} or \cite{kittel}.
\textbf{A lattice is defined as the infinite set of points produced by a linear combination of independent \emph{primitive lattice vectors}, with integer coefficients.}
Throughout this thesis, the primitive lattice vectors are labelled $ \V{a}_i $, and the coefficients $ n_i $, so for a $ d $-dimensional lattice, the lattice points $ \V{R} $ are given by
\begin{equation}\label{eq:lattice_points}
R = \sum_{i = 1}^{d} n_i \V{a}_i.
\end{equation}
For this thesis we will mainly focus on the cases of $ d = 3 $ (visualization of lattices, families of lattice planes and scattering) and $ d = 2 $ (visualization of the band structure).
A thing to note is that the choice of primitive lattice vectors is not unique. A new set of primitive lattice vectors can be created by taking a linear combination of the original primitive lattice vectors, with integer coefficients. If the original set is ordered in a matrix $ A = \begin{pmatrix*} \V{a}_1 & \V{a}_2 & \cdots & \V{a}_n \end{pmatrix*}$, and the new set in a matrix $ B = \begin{pmatrix*} \V{b}_1 & \V{b}_2 & \cdots & \V{b}_n \end{pmatrix*} $, then $ B = MA $, where $ M $ is the matrix containing the coefficients. This matrix must have integer entries, and its inverse likewise. The integer entries of the direct coefficient matrix $ M $ makes sure that any lattice point generated with the new set of primitive lattice vectors will also have integer coefficients when expressed in the old set of primitive lattice vectors. The integer entries of the inverse matrix $ M\inverse $ then makes sure that this process will also happen in reverse.
The second part is the unit cell. This is the building block of the lattice. It is a region of space which, when stacked will completely tile the space. Like with the choice of primitive lattice vectors, the choice of unit cell is not unique. In particular we distinguish between two type of unit cells: the smallest possible unit cell and everything else. The smallest possible unit cell is called a \textit{primitive} unit cell, and must contain only one lattice point (it cannot contain zero lattice points, as then it would not recreate the lattice when tiled). Any unit cell containing more than one lattice point is called a \textit{conventional unit cell}. Usually a conventional unit cell is chosen for ease of calculation (as we will see in the scattering simulation), where the primitive lattice vectors constitute an orthogonal set.
The third part of the crystal structure is the basis. This is a description of the physical objects that make up the structure, and their positions in relation to the lattice. In our case the objects are atoms. The basis is specified as a list of vectors that are to be added to the lattice points, specifying the position of the atoms in the crystal.
The user can create any type of crystal they want by specifying any set of primitive lattice vector and supplying any desired basis. However a small selection of crystals will be available as presets. These include the 14 Bravais lattices with a 1 atom basis [at $ (0,0,0) $], named in table \ref{tab:bravais}. Each of these will specify the primitive lattice vectors for a corresponding primitive unit cell. Furthermore five other crystal presets will be available, named in table \ref{tab:presets}. Specifications of all of these presets are available in the appendix \ref{app:lattice}.
\begin{table}[H]
\centering
\begin{tabular}{|l|l|}
\hline
Simple cubic & base centred cubic (bcc) \\
\hline
face centred cubic (fcc) & tetragonal \\
\hline
body centred tetragonal & orthorhombic \\
\hline
body centred orthorhombic & face centred orthorhombic \\
\hline
base centred orthorhombic & simple monoclinic \\
\hline
base centred monoclinic & hexaonal \\
\hline
triclinic & rhombohedral \\
\hline
\end{tabular}
\caption{The 14 Bravais lattices}
\label{tab:bravais}
\end{table}
\begin{table}[H]
\centering
\begin{tabular}{|l|l|}
\hline
fcc, conventional & bcc, conventional \\
\hline
zincblende & wurtzite \\
\hline
diamond (zincblende with 1 atom) & \\
\hline
\end{tabular}
\caption{Other available crystal presets}
\label{tab:presets}
\end{table}
\subsection{Implementation}
Mathematically speaking, a lattice is infinite. A physical crystal, of course, is not. However, even though a real crystal is finite, plotting all of the atoms would be infeasible, both due to the number of atoms, and the fact that each atom would be way too small. So for the purposes of this program, only a couple of unit cells will be plotted. A good amount seems to be 8 unit cells: 2 in each of the directions specified by the primitive lattice vectors. This keeps the size of the plot relatively small, whilst still showing the important parts of the crystal structure. As such, in the following we assume that $ n_i \in \{0, 1, 2\} $ for all $ n_i $.
In creating a program that plots these structures, the thought should always be on how the end product looks. Mainly we want the plot to be as clear and instructive as possible. This constitutes plotting only full unit cells: no unit cell can have missing atoms. There is a caveat to this however. Say we define the unit cell to have atoms on the boundary, then these atoms will be in multiple unit cells. One could say no matter how many unit cells we plot, we would always get some amount of unfinished unit cells. As such we will plot the 8 unit cells along with any atoms on the border of these.
\begin{wrapfigure}{r}{2in}
\begin{center}
\includegraphics[width=\linewidth]{figures/lattice_unfinished_1.pdf}
\end{center}
\caption{One conventional unit cell for a bcc lattice. If we naively plot all atoms associated with a lattice point, we will end up with atoms outside the unit cells we want to plot.}
\label{fig:lattice_unfinished_1}
\end{wrapfigure}
Even with the above considerations we need to remember that the screen is still just a two dimensional projection of the actual three dimensional phenomena. As such without some form of depth perception, the crystal will just look like a weird two dimensional pattern. This is fixed by including grid lines. They, along with the ability to interactively rotate the crystal, give the required depth perception to allow the user to comprehend the crystal as a three dimensional structure, and not as a two dimensional sheet of dots.
For the crystals that can be expressed as a lattice with orthogonal primitive lattice vectors and a basis (cubic, tetragonal and orthorhombic), we usually want to plot orthogonal grid lines, whilst for other crystals plotting grid lines along the lattice vectors will be more useful.
In the case of plotting primitive unit cells, the ones furthest away from the origin (say with $ n_1 = n_2 = n_3 = 2 $) may not be filled. Say the basis consists of only one atom. Then this atom will just be placed on the lattice points, and there is no problem, as this is the edge of the unit cell. However, if the basis consists of two atoms (or more), where one is placed at $ (0,0,0) $ and the other has some positive coordinates, then this second atom will be in a unit cell which is not supposed to be plotted (it would necessitate plotting the lattice points corresponding to $ n_i = 3 $). An example of this is figure \ref{fig:lattice_unfinished_1}, which shows one unit cell of a simple cubic lattice, with a two atom basis (corresponding to a conventional bcc unit cell). A way to correct for this is to write each plotted atoms position as a linear combination of the primitive lattice vectors with coefficients $ n'_i $ (where the coefficients here are real numbers, as they do not necessarily align with the lattice points). If the coefficients are $ 0 \leq n'_i \leq 2 $ for all $ n'_i $, then we plot the point.
For plotting conventional unit cells when the user inputs primitive lattice vectors (for cubic, tetragonal and orthorhombic lattices), we need something similar, otherwise a situation like in figure \ref{fig:lattice_unfinished_2} may occur. To fix this we calculate the side lengths of the cuboid plot box that just contains the parallelepiped arising from plotting the lattice points with the specified coefficients ($ n_i \in \{0, 1, 2\}$), and fill this plot box completely with atoms.
To calculate this, the program creates the 8 possible vectors arising from linear combinations of the primitive lattice vectors, with coefficients from the minimum and maximum coefficients and taking the limits of the plot box as the minimum and maximum values for these 8 vectors. For the specified lattice points, these 8 vectors are:
\begin{equation}
\begin{array}{llll}
\V{v}_1 = \V{0}, & \V{v}_2 = 2\V{a}_1, & \V{v}_3 = 2\V{a}_2, & \V{v}_4 = 2\V{a}_3, \\
\V{v}_5 = 2(\V{a}_1+\V{a}_2), & \V{v}_6 = 2(\V{a}_1+\V{a}_3), & \V{v}_7 = 2(\V{a}_2+\V{a}_3), &\V{v}_8 = 2(\V{a}_1+\V{a}_2+ \V{a}_3).
\end{array}
\end{equation}
These are the vertices of the aforementioned parallelepiped. The side lengths of the plot box are then taken as the maximum coordinates for these vectors, minus the minimum values (the side length in $ x $ might be $ v_{8,x}-v_{0,x} $ for example). Then we plot atoms for a larger range of coefficients to completely fill out this plot box. If some atoms fall outside of the box we hide them.
\begin{wrapfigure}{r}{2in}
\begin{center}
\includegraphics[width=\linewidth]{figures/lattice_unfinished_2.pdf}
\end{center}
\caption{An attempt to plot eight conventional unit cells for an FCC lattice, by using primitive lattice vectors with coefficients in the set $ \{0, 1, 2\} $. This does not fill out all eight unit cells, so we need to plot more lattice points than this. In practise we need $ \{-2, -1, 0, 1, 2, 3, 4\} $}
\label{fig:lattice_unfinished_2}
\end{wrapfigure}
\subsection{Step-by-step}
When the program is run it will either load the chosen crystal preset or plot the user's manually input crystal. If a crystal preset is chosen, the program loads it in such a way as to make the resulting plot as informative as possible (eg. place lattice vectors along cardinal axes for an orthogonal lattice, to make plotting grid lines easier). If the user manually specifies the lattice and basis, the program attempts to classify the lattice according to the specifications in the appendix \ref{app:lattice} (except for zincblende, diamond and wurtzite). Next the program checks whether or not the crystal should be rotated to make plotting prettier.
In general the rotation algorithm tries to align one lattice vector with the $ x $-axis. $ \V{a}_1 $ is preferred, but is only chosen to lie along the $ x $-axis if it forms an orthogonal pair with at least one other primitive lattice vector. The second primitive lattice vector of the pair ($\V{a}_2$ being preferred) is then aligned along the $ y $-axis. If the three primitive lattice vectors form an orthogonal set, then the last vector of the set will now be aligned along the $ z $-axis.
The actual rotation is done by rotating the whole crystal (each primitive lattice vector and all vectors in the basis) around the cross product between the initial vector and the destination vector. Rotating the crystal such that $ \V{a}_1 $ lies along the $ x $-axis is done by rotating along $ \V{a}_1 \times \U{x} $, with an angle of $ \sin \theta = |\V{a}_1 \times \U{x}| / |\V{a}_1| $. However, this might rotate the crystal the wrong way, depending on the orientation between the two vectors. Because of this the program checks whether or not the rotated initial vector and the destination vector are parallel (that is, if the rotated $ \V{a}_1 $ is parallel to $ \U{x} $). If this is not the case, the whole crystal is rotated about the same vector, with an angle $ -2\theta $.
Five of the Bravais lattices have specialised rotation functions: hexagonal, base centred monoclinic and the three face centred lattices.
For the hexagonal lattice, the program detects which primitive lattice vectors constitute the triangular lattice and orients them such that one is along the $ x $-axis and the other is in the $ xy $-plane (easily done by rotating the third primitive lattice vector such that it is parallel with the $ z $-axis). The same approach is used for the base centred monoclinic, but here the program always aligns $ \V{a}_1 $ along the $ x $-axis, and $ \V{a}_2 $ in the $ xy $-plane. Here however, the easy option of rotating $ \V{a}_3 $ is not available. Instead the program uses the fact that the vector rejection of $ \V{a}_2 $ with $ \V{a}_1 $ is orthogonal to $ \V{a}_1 $ (the vector rejection of $ \V{a}_2 $ with $ \V{a}_1 $ being $ \V{a}_2 $ minus the projection of $ \V{a}_2 $ along $ \V{a}_1 $):
\begin{equation}
\V{a}_{2,rej} = \V{a}_2 - \V{a}_{2, proj} = \V{a}_2 - \frac{\V{a}_2 \D \V{a}_1}{|\V{a}_1|} \V{a}_1.
\end{equation}
This vector rejection is then in the $ yz $-plane, and the crystal can be rotated around $ \V{a}_1 $ with the angle the rejection makes with $ \U{y} $: $ \cos \theta = \V{a}_{2,rej} \D \U{y} / |\V{a}_{2,rej}|$.
For the face centred lattices, the ideal, rotated lattice is created from the magnitudes of the primitive lattice vectors: if $ |\V{a}_1| = a, |\V{a}_2| = b, |\V{a}_3| = c $, then $ \V{a}'_1 = (a/2, b/2, 0), \V{a}'_2 = (a/2, 0, c/2), \V{a}'_3 = (0, b/2, c/2) $. First the crystal is rotated such that $ \V{a}_1 $ aligns with $ \V{a}'_1 $, by using their cross product. Then the crystal is rotated such that the now rotated $ \V{a}_2 $ aligns with $ \V{a}'_2 $, via the vector rejection of $ \V{a}_2 $ with $ \V{a}'_2 $. A thing to note is that these 5 rotation functions necessitates primitive lattice vectors of the form specified in the appendix. If the user specifies a lattice unlike those in the appendix, the program will not be able to identify it, and as such may rotate it ``incorrectly''. This could lead to the program not being able to impose grid lines along the cardinal axes for an fcc lattice, for example.
With the lattice and basis rotated, the program finds the limits of the plot box as written above, after which the crystal is generated. This is done by looping over the three ranges specified by the minimum and maximum coefficients, creating each lattice point $ \V{R} $ by Eq. \eqref{eq:lattice_points}. For each lattice point the program calculates the positions of the $ n $ atoms in the basis, by adding one of the $ n $ vectors in the basis to the lattice point, i.e. $ \V{r}_{atom, i} = \V{R} + \V{r}_{basis, i} $. Lists of the colours and sizes associated with each atom are also created at this point. After creating all the atoms, the program deletes any that may lie outside the limits of the plot box.
The only thing missing now is to create the grid lines and plot everything. The program has two ways of creating grid lines: along the primitive lattice vectors and along the cardinal axes.
Creating grid lines along the primitive lattice vectors works by taking each lattice point $ \V{R}_n $, and finding the lattice point $ \V{R}_m $ the furthest away from it, in the (positive) direction of these lattice vectors, such that $ \V{R}_m = \V{R}_n + \alpha \V{a}_i $, where $ \alpha > 0 $, for all $ i \in \{1,2,3\}$, and creating lines between $ \V{R}_n $ and $ \V{R}_m $. This does create duplicate grid lines, but these do not show on the final plot, since they are all plotted with the same width and colour.
Creating grid lines along the cardinal axes works by finding the minimum spacing between lattice points on these axes (called $ a_x, a_y$ and $ a_z $), and using these as the spacing between grid lines. The program then finds the maximum and minimum coordinates of lattice points along the cardinal axes (called $x_{min}, x_{max}$, etc.). This is used to create ranges corresponding to each lattice points on the cardinal axes: The range for the $ x $-axis starting at $ x_{min} $ and ending at $ x_{max} $ with steps of $ a_x $.
These ranges then specify a grid of lattice points on the $ xy$, $ xz $ and $ yz $-planes. The program then creates lines orthogonal to these planes, stretching from $ z_{min} $ to $ z_{max} $ for the points in the $ xy $-plane, and similarly for the other two planes.
With everything calculated a blank figure is created with limits as calculated above. The atoms are plotted with colours and sizes specified by the user. The grid lines are plotted with a uniform size and colour and lastly the primitive lattice vectors are plotted with corresponding labels.
\subsection{Examples}
As an example, we plot a simple cubic lattice with a two atom basis (corresponding to the basis of a bcc lattice with conventional unit cells), where the atoms on the lattice points are grey and the body centred atoms are blue (figure \ref{fig:lattice_demo_1}), we write the following:
\begin{lstlisting}
Lattice(lattice_name="conventional bcc",
colors=["xkcd:cement", "b"])
\end{lstlisting}
Or we could plot a hexagonal lattice with a one atom basis (figure \ref{fig:lattice_demo_2}):
\begin{lstlisting}
Lattice(lattice_name="hexagonal")
\end{lstlisting}
When plotted using the program, these plots are interactive and can be rotated.
\begin{figure}[h]
\centering
\begin{minipage}{.4\textwidth}
\centering
\includegraphics[width=2in]{figures/lattice_demo_1.pdf}
\captionof{figure}{A simple cubic lattice with a two atom basis: One atom on the lattice points and another in the middle of each unit cell.}
\label{fig:lattice_demo_1}
\end{minipage}%
\hfil
\begin{minipage}{.4\textwidth}
\centering
\includegraphics[width=2in]{figures/lattice_demo_2.pdf}
\captionof{figure}{A hexagonal lattice with a one atom basis. The lattice consists of a series of triangular lattices stacked on top of each other.}
\label{fig:lattice_demo_2}
\end{minipage}
\end{figure}
\end{document} | {
"alphanum_fraction": 0.747315327,
"avg_line_length": 117.4394904459,
"ext": "tex",
"hexsha": "87c5219157fe5f037eeefdd6334b2dff2814310c",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-05-24T08:32:13.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-07-19T05:12:31.000Z",
"max_forks_repo_head_hexsha": "e26f3cee6dcfc858b606b5d3112f553836dd3990",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "NikolaiNielsen/Bachelor",
"max_forks_repo_path": "thesis/lattices.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e26f3cee6dcfc858b606b5d3112f553836dd3990",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "NikolaiNielsen/Bachelor",
"max_issues_repo_path": "thesis/lattices.tex",
"max_line_length": 1128,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "e26f3cee6dcfc858b606b5d3112f553836dd3990",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "NikolaiNielsen/Bachelor",
"max_stars_repo_path": "thesis/lattices.tex",
"max_stars_repo_stars_event_max_datetime": "2021-06-06T09:18:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-17T02:07:14.000Z",
"num_tokens": 4905,
"size": 18438
} |
% Sample LaTeX file for creating a paper in the Morgan Kaufmannn two
% column, 8 1/2 by 11 inch proceedings format.
\documentclass[]{article}
\usepackage{proceed2e}
\usepackage[
backend=bibtex,
style=alphabetic,
citestyle=authoryear
]{biblatex}
\usepackage{aliascnt}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{caption}
\usepackage{mathtools}
\usepackage[capitalise, noabbrev]{cleveref}
\usepackage{float}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{bbm}
\usepackage{amsthm}
% Set the typeface to Times Roman
\usepackage{times}
%\binoppenalty=\maxdimen % to prevent breaking equations
%\relpenalty=\maxdimen % to prevent breaking equations
% Define the equation float env
\newaliascnt{eqfloat}{equation}
\newfloat{eqfloat}{h}{eqflts}
\floatname{eqfloat}{Equation}
\newtheorem{proposition}{Proposition}
\title{Differentiable Particle Filter}
\author{} % LEAVE BLANK FOR ORIGINAL SUBMISSION.
% UAI reviewing is double-blind.
% The author names and affiliations should appear only in the accepted paper.
%
%\author{ {\bf Harry Q.~Bovik\thanks{Footnote for author to give an
%alternate address.}} \\
%Computer Science Dept. \\
%Cranberry University\\
%Pittsburgh, PA 15213 \\
%\And
%{\bf Coauthor} \\
%Affiliation \\
%Address \\
%\And
%{\bf Coauthor} \\
%Affiliation \\
%Address \\
%(if needed)\\
%}
\addbibresource{DPFbibli.bib}
\begin{document}
\maketitle
\begin{abstract}
Sequential Monte Carlo (SMC or Particle Filters) methods are a set of powerful techniques for continuous state space models. Additionally to providing a belief for the state of an observed system, they also provide an estimate of its likelihood which is non-differentiable with respect to the model parameters. This is due to the fact that standard resampling schemes consist in a non-smooth re-indexing of the Monte Carlo sample. In this article we propose two new resampling schemes that rely on regularised optimal transport techniques and are respectively differentiable but biased, and non-differentiable but optimal in a certain metric. Furthermore we assess the behaviour of the methods in a linear setup by comparing with the Kalman Filter and discuss the influence of the hyper-parameters.
\end{abstract}
\section{INTRODUCTION}
Particle Filters \parencite[see][]{particlefilter} offer an efficient way of performing posterior inference in otherwise intractable non-linear state space models and provide an unbiased estimate of the likelihood of the state space model parameters given observed data. Formally particle filters are interested in estimating state space hidden Markov models described by an unobserved state $X_t \in \mathbb{R}^{d_X}$ following $X_t|(X_{t-1}=x) \sim f_t(\cdot|x), t > 0$ and $X_0 \sim \mu(\cdot) \in \mathcal{M}(\mathbb{R}^{d_X})$ and an observed process $Y_t|(X_t=x) \sim g_t(\cdot|x) \in \mathcal{M}(\mathbb{R}^{d_Y})$. They do this by keeping track of $X_t$ in the form of a weighted sample $(w^i_t, X_t^i)$ through a method called Sequential Importance Sampling, however, this technique applied by itself leads to weight degeneracy that needs to be mitigated \parencite[see][]{doucet2009tutorial}, a well accepted way to fight this is through the use of a resampling scheme that replaces low weight particles with high weights ones \parencite[see][]{hol2006resampling}.
In this paper we focus on regularised optimal transport as a resampling technique in two different ways: in \cref{sec:differentiable} we use the planning matrix as a direct map from a weighted sample $(w_i, X_i) \sim \mathbf{X}$ to an equally weighted sample $(\mathbbm{1}_N, \mathbf{Z}_i^{\epsilon}) \sim \mathbf{X}^{\epsilon}$, where $\mathbbm{1}_N$ is the vector of size $N$ filled with $\frac 1 N$ and show $\mathbf{X}^{\epsilon} \xrightarrow[\epsilon \to 0]{\mathcal{L}} \mathbf{X}$, because the mapping we use is differentiable w.r.t $(w_i, X_i)$, it will provide a biased but differentiable resampling scheme, then in \cref{sec:optimal} we provide a novel algorithm that learns the optimal equally-weighted sample $(\mathbbm{1}_N, \mathbf{Z}_i^{\epsilon})$ in the $\epsilon$-Sinkhorn divergence sense.
Our main contributions are two fold: we introduce the planning matrix of the regularised optimal transport as a differentiable resampling scheme and we introduce a novel resampling algorithm that guarantees optimality of the resampled particles.
The rest of the paper is organised as follows: in \cref{sec:background} we give a brief recapitulation of the Particle Filter \parencite[][]{particlefilter}, the biased Sinkhorn distances \parencite[][]{cuturi2013sinkhorn} and the Sinkhorn divergence \parencite[][]{feydy:interpolating}, in \cref{sec:differentiable} we introduce the differentiable particle filter, discuss its behaviour and provide examples, in \cref{sec:optimal} we introduce an optimal resampling scheme and provide examples for it, and finally we conclude with future possible extensions and improvements.
\section{BACKGROUND}
\label{sec:background}
\subsection{Particle Filters}
\label{subsec:PF}
Particle filters have emerged as a standard technique for non linear data assimilation and approximates the filtering (posterior) distribution $X_t|(Y_t=y, X_{t-1}))$ using a weighted set of $N$ samples $(w_i, X_i)$ which are updated following a predict-update approach. Any given quantity of interest $\mathbb{E}[\phi(X_t)]|(Y_t=y, X_{t-1}))]$ can then estimated as a weighted average $\sum_i w_i \phi(X_i)$ with variance given by a central limit theorem \parencite[see][]{chopin2004central}.
The "predict step" consists in proposing particles $X_t^i \sim p(\cdot|\mathbf{X}_{t-1}=X_{t-1}^i)$ whose weights will then be updated as per the bayes formula:
\begin{align}
w_t^i &= p(X_t^i|\mathbf{Y}_t=\mathbf{y},X_{t-1}^i) \\
&\propto p(Y_t|X_{t}^i) \cdot p(X_t^i|X_{t-1}^i) \label{eq:likelihood}\\
&= p(Y_t|X_{t}^i) \cdot w_{t-1}^i
\end{align}
These will then be normalised to sum to 1. Additionally the marginal likelihood $P(\mathbf{Y}_{u=1..t}|\mathbf{X}_{u=1..t}, \mathbf{\theta})$ up to time time $T$ where $\mathbf{\theta}$ are model parameters can be estimated through \cref{eq:likelihood} prior to normalisation.
However, as time passes, the weights suffer a well-known degeneracy problem, that is all weights will converge to 0 apart from 1, hence not providing a good distributional estimate for the posterior. This has traditionally been mitigated by ancestry resampling: instead of proposing $X_t^i \sim p(\cdot|\mathbf{X}_{t-1}=X_{t-1}^i)$, we propose $X_t^i \sim p(\cdot|\mathbf{X}_{t-1}=X_{t-1}^{a_i})$ where $a_i \in {1,..,N}$ is any index sampling such that $\mathbb{E}[\sum_i \mathbb{I}(a_i=j)|\mathbf{w}] = N w_j, \, \forall j \in {1,...,N}$ \parencite[see][]{doucet2009tutorial}. In practice this is only done when the efficient sample size (ESS) $\frac{1}{\sum_i w_i^2}$ is lower than a certain threshold (usually 50\% of the sample size $N$).
This is summarised in \cref{algo:bootstrap} and \cref{algo:resampling}. Because of the reparametrization trick \parencite[see][]{kingma2013auto}, only \cref{algo:resampling} makes the particle filter non-differentiable with respect to its inputs.
\begin{algorithm}
\caption{Bootstrap Particle Filter}
\label{algo:bootstrap}
\begin{algorithmic}
\STATE{\bfseries Input:} $X_i$, $w_i$, $y$, $N$, $X$, $L$ \COMMENT{Inputs at time $t > 0$},
\IF{$\text{ESS} < N \cdot \text{threshold}$}
\STATE{Resample}
\ENDIF
\FOR{$i=1$ {\bfseries to} $N$}
\STATE{Propose:} Sample $X_i ~ p(X_{t+1}|X_t=X_i)$
\STATE{Update:} Compute $w_i *= p(Y_t=y|X_t=X_i)$
\ENDFOR
\STATE{Compute log-likelihood update:} $L+=\log{\sum_i w_i}$
\FOR{$i=1$ {\bfseries to} $N$}
\STATE $w_i = \frac{w_i}{\sum_i w_i}$
\ENDFOR
\end{algorithmic}
\end{algorithm}
\begin{algorithm}
\caption{Generic resampling}
\label{algo:resampling}
\begin{algorithmic}
\STATE{\bfseries Input:} $X_i$, $w_i$, $N$,
\FOR{$i=1$ {\bfseries to} $N$}
\STATE{Sample:} $a_i$ \COMMENT{satisfying hypotheses, for example $a_i \sim \text{Multinomial}(\mathbf{w})$}
\STATE{Set:} $w_i = \frac{1}{N}$, $X_i = X_{a_i}$
\ENDFOR
\OUTPUT $\mathbf{w}$, $\mathbf{X}$
\end{algorithmic}
\end{algorithm}
In this article, similarly to \parencite{reich2012nonparametric,graham2019scalable} we consider an ensemble approach to particle filtering: instead of the resampling scheme $(w_i, X_i) \mapsto (\frac 1 N, X_{a_i})$ we provide two new mappings: we present a biased ensemble reweighting (\cref{sec:differentiable}) that comes from the planning matrix solution of a regularised Optimal Transport problem $(w_i, X_i) \mapsto (\frac 1 N, (M_{\epsilon}^{\mathbf{w}, \mathbf{X}} X)_i)$, and we present a learnt optimal recentering of the particles learnt to minimize the Sinkhorn Divergence \parencite[see][]{genevay2017learning,feydy:interpolating} $(w_i, X_i) \mapsto (\frac 1 N, Z^{\epsilon}_i)$.
\subsection{REGULARISED OPTIMAL TRANSPORT}
\label{subsec:OT}
\subsubsection{Optimal Transport}
Optimal transport is interested in computing a distance between measures. Formally, given two empirical probability measures $\alpha, \beta \in \mathcal{M}^+_1(\mathcal{X})$, and given a symmetric positive cost function $C: \mathcal{X} \times \mathcal{X} \to \mathbb{R}$, it is interested in computing both the minimum and the minimising argument of the functional $\pi \mapsto \int_{\mathcal{X}^2} C(x,y) d \pi$ for $\pi$ belonging to the simplex
$$S_{\alpha, \beta} = \left\{ \pi \in \mathcal{M}^+_1(\mathcal{X} \times \mathcal{X} ) | \int \pi(\cdot, dy) = \alpha, \int \pi(dx, \cdot) = \beta \right\}$$
\subsubsection{Sinkhorn distances}
If the supports of $\alpha$ and $\beta$ are of size $N$, this is known to scale in $O(n^3)$. \cite{cuturi2013sinkhorn} shows that a regularised version of the algorithm can be considered instead:
$$\textbf{OT}_{\epsilon} := \min_{\pi \in S_{\alpha, \beta}} \int_{\mathcal{X}^2} C(x,y) d \pi + \epsilon KL(\pi||\alpha \otimes \beta)$$
Thanks to Fenchel-Rockafellar theorem, this can be rewritten \parencite[see][]{feydy:interpolating,peyr2018computational} using dual functions $f, g \in \mathcal{C}(\mathcal{X})$:
\begin{align}
\textbf{OT}_{\epsilon} =&\max_{f, g \in \mathcal{C}(\mathcal{X})} \langle \alpha, f \rangle + \langle \beta, g \rangle \\
&- \epsilon \langle \alpha \otimes \beta, \exp \left( \frac 1 \epsilon \left( f \oplus g - C \right) \right) - 1 \rangle \nonumber\\
\text{with: } \pi_{\epsilon} = &\exp \left( \frac 1 \epsilon \left( f \oplus g - C \right) \right) \cdot \alpha \otimes \beta \label{eq:plan}
\end{align}
This has been key for the recent development of computational optimal transport \parencite{peyr2018computational} as it translates a problem over a matrix to a problem over two related vectors. Moreover the optimality condition on $f$ and $g$ can be written in a fixed-point theorem form: if $\alpha = (w^X_i, X_i)_{1 \leq i \leq N}, \beta = (w^Y_j, Y_j)_{1 \leq j \leq M}$ \parencite{feydy:interpolating} which one only has to iterate successively to until convergence.
\begin{align}
\forall i, j &\nonumber\\
f_i &= -\epsilon \text{LSE}_k(\log w^Y_k + \frac 1 \epsilon g_k - \frac 1 \epsilon C(X_i, Y_k)) \label{eq:fixed_1}\\
g_j &= -\epsilon \text{LSE}_k(\log w^X_k + \frac 1 \epsilon f_k - \frac 1 \epsilon C(X_k, Y_j)) \label{eq:fixed_2}
\end{align}
\subsubsection{Sinkhorn divergences}
\label{subsubsec:Div}
Crucially $\textbf{OT}_{\epsilon}(\cdot, \alpha)$ doesn't reach its minimum in $\alpha$, which means that the solution of $\min_{\beta} \textbf{OT}_{\epsilon}(\beta, \alpha)$ may actually lay far from $\alpha$. This has been outlined first in \cite{genevay2017learning}, and a solution is to consider instead the so-called "Sinkhorn Divergence"
\begin{align}
\mathcal{W}_\epsilon(w_X, X, w_y, Y) :=
& \; \text{OT}_{\epsilon}(w_X, X, w_y, Y) \label{eq:non_sym}\\
& - 0.5 \text{OT}_{\epsilon}(w_X, X, w_X, X) \label{eq:sym_1}\\
& - 0.5 \text{OT}_{\epsilon}(w_Y, Y, w_Y, Y)\label{eq:sym_2}
\end{align}
An important point is that the symmetric Optimal transport problems \cref{eq:sym_1,eq:sym_2} can be solved faster than the non-symmetric one, hence the computational burden is controlled by \cref{eq:non_sym}
\section{DIFFERENTIABLE RESAMPLING}
\label{sec:differentiable}
Given a weighted empirical distribution $(w_i, X_i)_{1 \leq i \leq N} \in \mathcal{M}^+_1(\mathbb{R}^d$ we are interested in finding a "good enough" unweighted sample in the sense that for any $\phi := \mathbb{R}^d \to \mathbb{R}$, $\sum_{1 \leq i \leq N} w_i \phi(X_i) \approx \frac{1}{N}\sum_{1 \leq i \leq N} \phi(X_i)$ with small enough variance \parencite[][]{resampling_comp}. While most approaches consider a bootstrapping of the particle based on the weights: $(w_i, X_i) \mapsto (\frac 1 N, X_{a_i})$ this provides a non-differentiable mapping that prevents exact propagation of the gradient through the resampling step. Because of this and to prevent high variance in the gradient estimation, recent works by \cite{maddison2017filtering,naesseth2017variational,le2017auto} that link SMC and Variational Inference simply ignore the additional impact of resampling and tweak the AutoDiff scheme to propagate the gradient of particles that were not discarded at the resampling step only. As discussed in their papers this provides a biased estimator of the likelihood.
\subsection{OPTIMAL TRANSPORT MAP RESAMPLING}
\label{subsec:otResampling}
To the best of our knowledge the first paper to have introduced Optimal Transport map as an ensemble technique for resampling is \cite{reich2012nonparametric}, and the method has since been applied in \cite{graham2019scalable} and \cite{jacob2016coupling} to provide a local mapping from prior to posterior and to couple same-seed particle filters trajectories in order to compute sensitivities with respect to its hyper-parameters.
The paradigm introduced in \cite{reich2012nonparametric} can be phrased as follows.
Let $(w, X)_i$ be a weighted sample before resampling and $C \in \mathbb{R}^d \times \mathbb{R}^d$ be a symmetric positive cost matrix, and lets consider the following Optimal Transport problem:
\begin{align}
\mathbf{U} = \text{argmin}_{M \in S_{\mathbf{w}, \mathbf{1}_N}} \sum_{i,j} C_{i,j}, M_{i,j} \label{eq:ot_res}
\end{align}
As discussed in \cite{reich2012nonparametric}, this matrix constitutes a Markov Chain $\mathbf{P} := N \mathbf{U}$ and the mapping $\tilde{X} = \mathbf{P} \mathbf{X}$ provides a consistent estimate of $(w_i, X_i)_i$.
However the function $(w_i, X_i)_i \to \mathbf{P}\mathbf{X}$ is difficult to compute \parencite[see][]{cuturi2013sinkhorn}.
\subsection{DIFFERENTIABLE RESAMPLING}
\label{subsec:regOTResamp}
Instead of considering the non-regularised problem in \cref{eq:ot_res}, we instead use the mapping resulting from the regularised version of the Optimal Transport problem \cref{eq:plan} \parencite{cuturi2013sinkhorn,feydy:interpolating} $(\mathbf{w}, \mathbf{X}) \to \mathbf{P}_\epsilon \mathbf{X}$.
\begin{proposition}
\label{prop:differentiability}
The mapping $$(\mathbf{w}, \mathbf{X}) \to \mathbf{P}_\epsilon = \exp \left( \frac 1 \epsilon \left( \mathbf{f}^T + \mathbf{g} - \mathbf{C}(\mathbf{X},\mathbf{X}) \right) \right) \cdot \mathbf{w}^T$$ where $\mathbf{f}$ and $\mathbf{g}$ are given by \cref{eq:fixed_1,eq:fixed_2} is differentiable, with:
\begin{align}
\forall i, j &\nonumber\\
\frac{\partial f_i}{\partial \cdot} &= \frac{\partial}{\partial \cdot}-\epsilon \text{LSE}_k(\log w^Y_k + \frac 1 \epsilon g_k - \frac 1 \epsilon C(X_i, Y_k)) \label{eq:fixed_1_deriv}\\
\frac{\partial g_j}{\partial \cdot} &= \frac{\partial}{\partial \cdot}-\epsilon \text{LSE}_k(\log w^X_k + \frac 1 \epsilon f_k - \frac 1 \epsilon C(X_k, Y_j)) \label{eq:fixed_2_deriv}
\end{align}
\end{proposition}
\begin{proof}
The system of equations \cref{eq:fixed_1,eq:fixed_2} defines an system of implicit functions to which we can apply the implicit function theorem. In this case the application of it is trivial as the relationship is linear.
\end{proof}
In practice this means that the gradients can be propagated by automatic differentiation at the last step of the Sinkhorn iterates only provided that the algorithm has converged.
When using automatic differentiation, this can be summarised as:
\begin{algorithm}
\caption{Biased resampling}
\label{algo:biasedResampling}
\begin{algorithmic}
\STATE{\bfseries Input:} $X_i$, $w_i$, $N$, $n_{\text{steps}}$
\STATE Stop registering gradients
\STATE{\bfseries Initialise:} $\mathbf{f}$, $\mathbf{g}$
\STATE
\FOR{$i=1$ {\bfseries to} $n_{\text{steps}}-1$}
\STATE evaluate \cref{eq:fixed_1} and \cref{eq:fixed_2} simultaneously
\ENDFOR
\STATE Register gradients
\STATE Set gradients of $\mathbf{f}$, $\mathbf{g}$ to $0$
\STATE Evaluate \cref{eq:fixed_1}, \cref{eq:fixed_2}
\OUTPUT $\frac 1 N \mathbbm{1}_N$, $\mathbf{P} \mathbf{X}$ with $P$ given by \cref{eq:plan}
\end{algorithmic}
\end{algorithm}
\subsubsection{Illustration}
To illustrate the behaviour of the resampling scheme we consider a bimodal 2D distribution of 500 constructed as follows: 500 points $X_i \in \mathbb{R}^2$ are drawn uniformly within a circle of radius 1, then half the sample is randomly set to have weights proportional to $\mathcal{N}\left(\left(\begin{matrix}-0.5 \\ 0.5\end{matrix}\right), \left(\begin{matrix}0.3 & 0.\\ 0. & 0.3 \end{matrix}\right)\right)$ and the other half $\mathcal{N}\left(\begin{matrix}0.5 \\ -0.5\end{matrix}, \begin{matrix}0.1 & 0.\\ 0. & 0.1 \end{matrix}\right)$, see \cref{fig:BiasedTransport}. This corresponds tp a multimodal distribution $X |\, ||X||^2_2 \leq 1$ where $X\sim \mathcal{N}\left(\left(\begin{matrix}-0.5 \\ 0.5\end{matrix}\right), \left(\begin{matrix}0.3 & 0.\\ 0. & 0.3 \end{matrix}\right)\right) + \mathcal{N}\left(\left(\begin{matrix}0.5 \\ -0.5\end{matrix}\right), \left(\begin{matrix}0.1 & 0.\\ 0. & 0.1 \end{matrix}\right)\right)$
\begin{figure}
\centering
\captionsetup{justification=centering}
\includegraphics[width=\linewidth]{BiasedTransport}
\caption{Biased transport comparison}
\label{fig:BiasedTransport}
\end{figure}
\cref{fig:BiasedTransport} illustrates a well-known problem of the regularised Sinkhorn algorithm: as the regularisation increases, the resulting transporting plan will converge to the one that minimizes $KL(\pi||\alpha\otimes\beta)$, in this case $\mathbf{w_X} \otimes \mathbf{\mathbbm{1}_N}$, hence the resulting resampling of $\mathbf{X}$: $\mathbf{X_\epsilon} = \mathbf{P_\epsilon} \mathbf{X}$ collapses to the weighted mean of the sample $\mathbf{X}$.
\subsubsection{Application To A State Space Model}
We now consider the following noisy resonator:
\begin{align}
x_\text{true}(t) = \sin(t) + \frac 1 2 \mathcal{N}(0, 1), t = 0., 0.1, ..., 20
\end{align}
That we model by the following 2D state space model
\begin{align}
\label{eq:ssm}
x_0(t + dt) &= x_0(t) + x_1(t) dt + N(0, \sigma_0^2)\\
x_1(t + dt) &= x_1(t) - x_0(t) dt + N(0, \sigma_1^2)\\
y(t) &= x_0(t) + N(0, \sigma_y^2)
\end{align}
\begin{figure}
\centering
\captionsetup{justification=centering}
\includegraphics[width=\linewidth]{KF_OptimalTransportPF_comp}
\caption{Filtering applied to \cref{eq:ssm}
Top: Kalman Filter, Second and Third: Biased Transport with $\epsilon=0.25,1$, Bottom: Systematic Resampling }
\label{fig:kf_illustration}
\end{figure}
\cref{fig:kf_illustration} compares \cref{algo:biasedResampling} for different regularisations with \cref{algo:resampling} with systematic resampling \parencite{resampling_comp}, all filters have 100 particles. The shaded zone corresponds to $\pm 2 \text{standard deviations}$
The collapsing phenomenon in the sample due to the regularisation as highlighted in \cref{fig:BiasedTransport} is visible in \cref{fig:kf_illustration}: each resampling step results in a decrease in variance of the sample.
\subsubsection{Likelihood evaluation}
Together with the resampling in \cref{algo:resampling} the formula coming from \cref{algo:bootstrap} provides an unbiased estimate of the likelihood for the state space model associated and also comes with a central limit theorem \cite{chopin2004central}. However, the resulting function $\hat{\mathcal{L}}(\mathbf{y}|\mathbf{\theta})$ is not continuous and as a consequence not differentiable with respect to $\mathbf{\theta}$. On the other hand using \cref{algo:biasedResampling} provides a theoretically everywhere differentiable scheme for "recentering" the particles, albeit to the cost of biasedness in the resulting estimate.
\begin{figure*}
\centering
\captionsetup{justification=centering}
\includegraphics[width=\textwidth]{likelihood}
\caption{Likelihood estimate w.r.t. the observation log-error, 400 points}
\label{fig:likelihood}
\end{figure*}
\begin{figure*}
\centering
\captionsetup{justification=centering}
\includegraphics[width=\textwidth]{likelihoodGradient}
\caption{Gradient of the likelihood estimate w.r.t. the observation log-error}
\label{fig:likelihoodGradient}
\end{figure*}
\section{OPTIMAL RESAMPLING}
\label{sec:optimal}
While the scheme in \cref{sec:differentiable} has the advantage of providing a gradient for the resampling schemes, it suffers from the inconvenient of providing a collapsed estimate of the state post resampling, which in turns results in a biased estimate of the likelihood. Instead of learning a biased linear mapping, we can instead learn the best unweighted point cloud minimising a distance from the weighted degenerate sample. To this end we consider the Sinkhorn divergence (see \cref{subsubsec:Div})
\subsection{LEARNT POINTS CLOUD}
As in \cite{genevay2017learning} we consider the optimisation problem given by the Wasserstein divergence $\mathcal{W}_\epsilon = 2 OT_{\epsilon}(\alpha,\beta) - OT_{\epsilon}(\alpha,\alpha) - OT_{\epsilon}(\beta,\beta)$, where in our case $\alpha$ is the weighted degenerate sample $(w_i, X_i)_i$ and $\beta$ is the target unweighted sample $(\frac 1 N, Z_i)_i$: this constitutes a gradient descent on $Z$ with respect to the loss given by the Wasserstein divergence.
The resampling algorithm is therefore modified as in \cref{algo:optimalResampling}
\begin{algorithm}
\caption{Optimal resampling}
\label{algo:optimalResampling}
\begin{algorithmic}
\STATE{\bfseries Input:} $X_i$, $w_i$, $N$, $n_{\text{steps}}$, tolerance, $\lambda$ \COMMENT{Learning rate}
\STATE Stop registering gradients
\STATE{\bfseries Initialise:} $\mathbf{f}$, $\mathbf{g}$
\STATE $Z \leftarrow X$
\FOR{$i=1$ {\bfseries to} $n_{\text{steps}}$}
\IF{$\mathcal{W}_\epsilon(w,X,\frac 1 N, Z)<\text{tolerance}$}
\STATE Break
\ENDIF
\STATE $Z \leftarrow Z - \lambda \nabla_Z \mathcal{W}_\epsilon$
\ENDFOR
\OUTPUT $\frac 1 N \mathbbm{1}_N$, $Z$
\end{algorithmic}
\end{algorithm}
\begin{figure*}
\centering
\captionsetup{justification=centering}
\includegraphics[width=\textwidth]{LearntReweighting}
\caption{Gradient Flow for learning the unweighted sample\\
Top left: original sample, right and bottom: evolution of the gradient descent}
\label{fig:LearntReweighting}
\end{figure*}
The behaviour of \cref{algo:optimalResampling} is shown in \cref{fig:LearntReweighting}
\section{CONCLUSION AND FUTURE WORKS}
\label{sec:conclusion}
\subsubsection*{References}
\printbibliography[heading=none]
\end{document}
| {
"alphanum_fraction": 0.7221805138,
"avg_line_length": 68.1136363636,
"ext": "tex",
"hexsha": "9fa6872af668f23861057d303f75026b1a10148b",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-09-21T00:48:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-09-21T00:48:19.000Z",
"max_forks_repo_head_hexsha": "ec5f43a5e20d1280260e482ee0f9139fb9d1ca2b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "AdrienCorenflos/PFlow",
"max_forks_repo_path": "DPF.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ec5f43a5e20d1280260e482ee0f9139fb9d1ca2b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AdrienCorenflos/PFlow",
"max_issues_repo_path": "DPF.tex",
"max_line_length": 1076,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ec5f43a5e20d1280260e482ee0f9139fb9d1ca2b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AdrienCorenflos/PFlow",
"max_stars_repo_path": "DPF.tex",
"max_stars_repo_stars_event_max_datetime": "2022-01-02T12:38:39.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-02T12:38:39.000Z",
"num_tokens": 7527,
"size": 23976
} |
\documentclass[12pt]{cdblatex}
\usepackage{exercises}
\usepackage{fancyhdr}
\usepackage{footer}
\begin{document}
% --------------------------------------------------------------------------------------------
\section*{Exercise 1.4 Experiments with sorting}
\begin{cadabra}
{a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z#}::Indices(position=independent).
\partial{#}::PartialDerivative.
expr := C^{f}
w^{e}
B^{d}
v^{c}
A^{b}
u^{a}. # cdb (ex-0104.100,expr)
sort_product (expr) # cdb (ex-0104.101,expr)
expr := \Omega_{f}
\gamma_{e}
\Pi_{d}
\beta_{c}
\Gamma_{b}
\alpha_{a}. # cdb (ex-0104.200,expr)
sort_product (expr) # cdb (ex-0104.201,expr)
expr := C^{f}
w^{e}
B^{d}
v^{c}
A^{b}
u^{a}
\Omega_{f}
\gamma_{e}
\Pi_{d}
\beta_{c}
\Gamma_{b}
\alpha_{a}. # cdb (ex-0104.300,expr)
sort_product (expr) # cdb (ex-0104.301,expr)
expr := \partial_{f}{C^{f}}
w^{l}
\partial_{d}{B^{d}}
v^{k}
\partial_{b}{A^{b}}
u^{j}
\Omega_{i}
\partial^{e}{ \gamma_{e}}
\Pi_{h}
\partial^{c}{\beta_{c}}
\Gamma_{g}
\partial^{a}{\alpha_{a}}. # cdb (ex-0104.400,expr)
sort_product (expr) # cdb (ex-0104.401,expr)
expr := \partial{C}
w
\partial{B}
v
\partial{A}
u
\Omega
\partial{ \gamma}
\Pi
\partial{\beta}
\Gamma
\partial{\alpha}. # cdb (ex-0104.500,expr)
sort_product (expr) # cdb (ex-0104.501,expr)
expr := A_{b}
A_{a}
A_{c d e}
A_{f g}. # cdb (ex-0104.600,expr)
sort_product (expr) # cdb (ex-0104.601,expr)
expr := A_{a} A^{a}
+ A^{a} A_{a}. # cdb (ex-0104.700,expr)
sort_product (expr) # cdb (ex-0104.701,expr)
\end{cadabra}
% \clearpage
\begin{dgroup*}[spread=2pt]
\Dmath*{\cdb*{ex-0104.100}}\Dmath*{\cdb*{ex-0104.101}\V{10pt}}
\Dmath*{\cdb*{ex-0104.200}}\Dmath*{\cdb*{ex-0104.201}\V{10pt}}
\Dmath*{\cdb*{ex-0104.300}}\Dmath*{\cdb*{ex-0104.301}\V{10pt}}
\Dmath*{\cdb*{ex-0104.400}}\Dmath*{\cdb*{ex-0104.401}\V{10pt}}
\Dmath*{\cdb*{ex-0104.500}}\Dmath*{\cdb*{ex-0104.501}\V{10pt}}
\Dmath*{\cdb*{ex-0104.600}}\Dmath*{\cdb*{ex-0104.601}\V{10pt}}
\Dmath*{\cdb*{ex-0104.700}}\Dmath*{\cdb*{ex-0104.701}}
\end{dgroup*}
\end{document}
| {
"alphanum_fraction": 0.4236159776,
"avg_line_length": 26.9245283019,
"ext": "tex",
"hexsha": "0a89f4f9f1412a33d907fa662e13fc351eec403b",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-12-22T13:52:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-22T13:52:19.000Z",
"max_forks_repo_head_hexsha": "5b428ae158b5346315ab6c975dee9de933e5c3d7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "leo-brewin/cadabra-tutorial",
"max_forks_repo_path": "source/cadabra/exercises/ex-0104.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5b428ae158b5346315ab6c975dee9de933e5c3d7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "leo-brewin/cadabra-tutorial",
"max_issues_repo_path": "source/cadabra/exercises/ex-0104.tex",
"max_line_length": 94,
"max_stars_count": 20,
"max_stars_repo_head_hexsha": "5b428ae158b5346315ab6c975dee9de933e5c3d7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "leo-brewin/cadabra-tutorial",
"max_stars_repo_path": "source/cadabra/exercises/ex-0104.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-27T22:55:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-12-20T07:49:47.000Z",
"num_tokens": 971,
"size": 2854
} |
In this section, we develop a technique for probabilistic transient power and
temperature analysis of electronic systems using the uncertainty-unaware
approach presented in \sref{transient-solution} combined with the machinery
described in \sref{chaos-uncertainty-analysis}. Our goal is to obtain a
technique that preserves the gradual solution process that is at the heart of
transient analysis; see \sref{transient-analysis}. In this way, the designer
retains fine control over transient calculations.
\subsection{\problemtitle}
As stated in \sref{chaos-uncertainty-analysis}, the designer is supposed to
decide on the system model that produces the quantity of interest. Assuming the
general system and temperature models given in \sref{system-model} and
\sref{temperature-model}, respectively, the only component that is left to
define is the power model, since it is what introduces the actual workload into
the system. Denote the power model by
\begin{equation} \elab{chaos-power-model}
\vp = \f(i, \vq, \vu)
\end{equation}
where $\f: \natural[+] \times \real^\np \times \real^\nu \to \real^\np$ is a
function that evaluates the power consumption $\vp \in \real^\np$ of the
processing elements at time step~$i$ given their heat dissipation $\vq \in
\real^\np$ and an assignment of the uncertain parameters $\vu \in \real^\nu$.
\begin{remark}
It should be understood that \vp, \vq, and \vu are random vectors in general,
and that \f consumes $\vq(\omega)$ and $\vu(\omega)$ and yields $\vp(\omega)$
for some particular outcome $\omega \in \Omega$. The function \f \perse is
purely deterministic.
\end{remark}
The designer can choose any \f. For instance, it can be a closed-form formula or
a piece of code. The only assumption we make is that \f is smooth in \vz and,
when viewed as a random variable, belongs to $\L{2}{\Omega, \F, \probability}$
(see \xref{probability-theory}), which is generally applicable to most physical
systems \cite{xiu2010}. The definition of \f is flexible enough to account for
such phenomena as the interdependence between power and temperature discussed in
\sref{power-model}.
Our solution to transient analysis under process variation is based on the one
presented in \sref{transient-solution}. The major difference is that
\eref{temperature-model} implicitly operates on stochastic quantities in the
context of this chapter. Consequently, the recurrent solution in
\eref{transient-recurrence}, that is,
\begin{equation} \elab{chaos-recurrence-original}
\vs_i = \m{E} \vs_{i - 1} + \m{F} \vp_i
\end{equation}
for $i = \range{1}{\ns}$, is stochastic as well. In the deterministic case, it
can be readily employed in order to perform transient power and temperature
analysis. In the probabilistic case, however, the situation is substantially
different, since $\vp_i$ and hence $\vs_i$ and $\vq_i$ are stochastic
quantities. Moreover, at each step, $\vp_i$ is an arbitrary transformation of
the uncertain parameters \vu and stochastic temperature $\vq_i$, which results
in a random vector with a generally unknown probability distribution.
Furthermore, $\vp_i$, $\vq_i$, $\vs_i$, and \vu are dependent random vectors,
since the first three are functions of the last one. Therefore, the operations
in \eref{chaos-recurrence-original} are to be performed on dependent random
vectors with arbitrary distributions, which, in general, have no closed-form
solutions.
Let us now summarize Stage~1 in \fref{chaos-overview}. In this section, the
quantity of interest \g is the transient power and temperature
profiles---denoted by \mp and \mq, respectively---that correspond to the
workload specified by \f as shown in \eref{chaos-power-model}. Regarding the
uncertain parameters \vu, they are left unspecified, since the construction
below is general with respect to \vu; however, a concrete scenario will be
considered in the next section, \sref{chaos-transient-application}. We now
proceed directly to Stage~3, as Stage~2 requires no additional attention in this
case.
\subsection{Surrogate Construction}
The goal now is to transform the recurrence in \eref{chaos-recurrence-original}
in such a way that the distributions of power and temperature can be efficiently
estimated. Based on the methodology presented in
\sref{chaos-uncertainty-analysis}, we construct a \ac{PC} expansion of \f so
that it can then be propagated through the recurrence in
\eref{chaos-recurrence-original} in order to obtain a \ac{PC} expansion of power
and a \ac{PC} expansion of temperature.
The expansion of $\vp_i$, which is required in \eref{chaos-recurrence-original}
and computed via \f shown in \eref{chaos-power-model}, is as follows:
\[
\chaos{\nz}{\lc}{\vp_i} = \sum_{\vj \in \sparseindex{\nz}{\lc}} \hat{\vp}_{i \vj} \psi_{\vj}
\]
where $\set{\psi_{\vj}}{\real^\nz \to \real}$ are the basis polynomials,
$\set{\hat{\vp}_{i \vj}} \subset \real^\np$ are the corresponding coefficients,
and the index set $\sparseindex{\nz}{\lc}$ is the one given in
\eref{index-total-order-anisotropic}. In addition,
\rref{chaos-multidimensional-output} is worth recalling. It can be seen in
\eref{chaos-recurrence-original} that, due to the linearity of the operations
involved in the recurrence, $\vs_i$ attains such a \ac{PC} expansion that has
the same structure as the expansion of $\vp_i$. The recurrence in
\eref{chaos-recurrence-original} can then be rewritten as follows:
\[
\chaos{\nz}{\lc}{\vs_i} = \m{E} \, \chaos{\nz}{\lc}{\vs_{i - 1}} + \m{F} \, \chaos{\nz}{\lc}{\vp_i}
\]
for $i = \range{1}{\ns}$. Consequently, there are two interwoven \ac{PC}
expansions: one is for power, and the other for temperature. The two expansions
have the same polynomial basis but different coefficients. In order to
understand the structure of the above formula better, let us spell it out as
\[
\sum_{\vj \in \sparseindex{\nz}{\lc}} \hat{\vs}_{i \vj} \psi_{\vj} =
\sum_{\vj \in \sparseindex{\nz}{\lc}} \left(\m{E} \hat{\vs}_{i - 1, \vj} + \m{F} \hat{\vp}_{i \vj}\right) \psi_{\vj}.
\]
Making use of the orthogonality property, which can be seen in
\eref{chaos-orthogonality}, we obtain the following recurrence:
\begin{equation} \elab{chaos-recurrence}
\hat{\vs}_{i \vj} = \m{E} \hat{\vs}_{i - 1, \vj} + \m{F} \hat{\vp}_{i \vj}
\end{equation}
for $i = \range{1}{\ns}$ and $\vj \in \sparseindex{\nz}{\lc}$. The coefficients
$\set{\hat{\vp}_{i \vj}}$ needed in this recurrence are found using a suitable
quadrature $\quadrature{\nz}{\lq}$ (see \xref{numerical-integration}) as
follows:
\[
\hat{\vp}_{i \vj} = \quadrature{\nz}{\lq}{\vp_i \psi_{\vj}}
\]
for $i = \range{1}{\ns}$ and $\vj \in \sparseindex{\nz}{\lc}$ where the
operation is elementwise, and it can be efficiently performed by virtue of the
projection matrix in \eref{chaos-projection-matrix}.
The final step of the construction process is to combine \eref{chaos-recurrence}
with \eref{temperature-algebraic} in order to obtain the coefficients of the
\ac{PC} expansion of the temperature vector $\vq_i$. Note that, since power
depends on temperature, which is discussed in \sref{power-model}, at each step
of the recurrence in \eref{chaos-recurrence}, the computation of $\hat{\vp}_{i
\vj}$ via \f should be done with respect to the expansion of $\vq_{i - 1}$.
The construction process of the stochastic power and temperature profiles is
estimated to have the following time complexity per time step:
\[
\bigo{n_n^2 \nc + \nn \np \nq \nc + \nq \f(\np)}
\]
where \nn, \np, \nc, and \nq are the number of thermal nodes, processing
elements, polynomial terms, and quadrature points, respectively; and $\f(\np)$
denotes the contribution of the power model shown in \eref{chaos-power-model}.
The expression can be detailed further by expanding \nc and \nq. Assuming the
isotropic total-order index set in \eref{index-total-order-isotropic}, \nc can
be calculated as shown in \eref{index-total-order-isotropic-length}. This
formula behaves as $\nz^\lc / \lc!$ in the limit with respect to \nz. Regarding
\nq, for quadratures based on the full tensor product given in
\eref{quadrature-tensor}, $\log(\nq) \propto \nz$, which means that the
dependency of \nq on \nz is exponential.
It can be seen that the theory of \ac{PC} expansions suffers from the curse of
dimensionality \cite{eldred2008, xiu2010}: when \nz increases, the number of
polynomial terms as well as the complexity of the corresponding coefficients
exhibit growth, which is exponential without special treatments. The problem
does not have a general solution and is one of the central topics of many
ongoing studies.
We mitigate the curse of dimensionality by \one~keeping the number of stochastic
dimensions low via model order reduction, which is a part of $\transform$ shown
in \eref{chaos-transformation} and is based on the \ac{KL} decomposition
described in \xref{probability-transformation}, and \two~utilizing efficient
construction techniques, which is discussed in \sref{chaos-construction} and
\xref{numerical-integration}. For instance, in the case of isotropic integration
grids based on the Smolyak algorithm and Gaussian quadratures, $\log(\nq)
\propto \log(\nz)$, which means that the dependency of \nq on \nz is only
polynomial \cite{heiss2008}. Anisotropic constructions allow for further
reduction.
Let us summarize Stage~3 in \fref{chaos-overview}. Recall the stochastic
recurrence in \eref{chaos-recurrence-original} where, in the presence of
dependencies, an arbitrary function $\vp_i$ (since it is based on \f as shown
\eref{chaos-power-model}) of the stochastic temperature $\vq_i$ and the
uncertain parameters \vu needs to be evaluated and combined with the random
vector $\vs_i$. This recurrence has been replaced with a purely deterministic
one in \eref{chaos-recurrence}. More generally, the whole system, including the
temperature model in \eref{temperature-model-original}, has been substituted
with a lightweight surrogate defined by a set of polynomials $\set{\psi_{\vj}}$,
a set of coefficients $\set{\hat{\vp}_{i \vj}}$ for power, and a set of
coefficient $\set{\hat{\vq}_{i \vj}}$ for temperature. These quantities
constitute the desired stochastic power and temperature profiles, and these
profiles are ready to be analyzed at each step of the process as described in
\sref{chaos-processing}.
Before we proceed, let us draw attention to the ease and generality of taking
process variation into consideration using the proposed approach. The
description given above is delivered from any explicit formula of any particular
process parameter. In contrast, the solutions from the literature related to
process variation are typically based on ad~hoc expressions and should be
individually tailored by the designer to each new parameter; see, for instance,
\cite{ghanta2006, bhardwaj2008, huang2009a}. The proposed framework provides
great flexibility in this regard.
| {
"alphanum_fraction": 0.7609958506,
"avg_line_length": 58.3064516129,
"ext": "tex",
"hexsha": "2b491e852a474087f59c22f8c9dd1f2dbe4e7727",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "IvanUkhov/thesis",
"max_forks_repo_path": "include/uncertainty/process/development/transient-analysis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "IvanUkhov/thesis",
"max_issues_repo_path": "include/uncertainty/process/development/transient-analysis.tex",
"max_line_length": 119,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "95a7e2ee7664b94156906322610555e36e53cfe0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "IvanUkhov/thesis",
"max_stars_repo_path": "include/uncertainty/process/development/transient-analysis.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2891,
"size": 10845
} |
% Default to the notebook output style
% Inherit from the specified cell style.
\documentclass[11pt]{article}
\usepackage[T1]{fontenc}
% Nicer default font (+ math font) than Computer Modern for most use cases
\usepackage{mathpazo}
% Basic figure setup, for now with no caption control since it's done
% automatically by Pandoc (which extracts  syntax from Markdown).
\usepackage{graphicx}
% We will generate all images so they have a width \maxwidth. This means
% that they will get their normal width if they fit onto the page, but
% are scaled down if they would overflow the margins.
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth
\else\Gin@nat@width\fi}
\makeatother
\let\Oldincludegraphics\includegraphics
% Set max figure width to be 80% of text width, for now hardcoded.
\renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}}
% Ensure that by default, figures have no caption (until we provide a
% proper Figure object with a Caption API and a way to capture that
% in the conversion process - todo).
\usepackage{caption}
\DeclareCaptionLabelFormat{nolabel}{}
\captionsetup{labelformat=nolabel}
\usepackage{adjustbox} % Used to constrain images to a maximum size
\usepackage{xcolor} % Allow colors to be defined
\usepackage{enumerate} % Needed for markdown enumerations to work
\usepackage{geometry} % Used to adjust the document margins
\usepackage{amsmath} % Equations
\usepackage{amssymb} % Equations
\usepackage{textcomp} % defines textquotesingle
% Hack from http://tex.stackexchange.com/a/47451/13684:
\AtBeginDocument{%
\def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
}
\usepackage{upquote} % Upright quotes for verbatim code
\usepackage{eurosym} % defines \euro
\usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
\usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document
\usepackage{fancyvrb} % verbatim replacement that allows latex
\usepackage{grffile} % extends the file name processing of package graphics
% to support a larger range
% The hyperref package gives us a pdf with properly built
% internal navigation ('pdf bookmarks' for the table of contents,
% internal cross-reference links, web links for URLs, etc.)
\usepackage{hyperref}
\usepackage{longtable} % longtable support required by pandoc >1.10
\usepackage{booktabs} % table support for pandoc > 1.12.2
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
% normalem makes italics be italics, not underlines
% Colors for the hyperref package
\definecolor{urlcolor}{rgb}{0,.145,.698}
\definecolor{linkcolor}{rgb}{.71,0.21,0.01}
\definecolor{citecolor}{rgb}{.12,.54,.11}
% ANSI colors
\definecolor{ansi-black}{HTML}{3E424D}
\definecolor{ansi-black-intense}{HTML}{282C36}
\definecolor{ansi-red}{HTML}{E75C58}
\definecolor{ansi-red-intense}{HTML}{B22B31}
\definecolor{ansi-green}{HTML}{00A250}
\definecolor{ansi-green-intense}{HTML}{007427}
\definecolor{ansi-yellow}{HTML}{DDB62B}
\definecolor{ansi-yellow-intense}{HTML}{B27D12}
\definecolor{ansi-blue}{HTML}{208FFB}
\definecolor{ansi-blue-intense}{HTML}{0065CA}
\definecolor{ansi-magenta}{HTML}{D160C4}
\definecolor{ansi-magenta-intense}{HTML}{A03196}
\definecolor{ansi-cyan}{HTML}{60C6C8}
\definecolor{ansi-cyan-intense}{HTML}{258F8F}
\definecolor{ansi-white}{HTML}{C5C1B4}
\definecolor{ansi-white-intense}{HTML}{A1A6B2}
% commands and environments needed by pandoc snippets
% extracted from the output of `pandoc -s`
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\newenvironment{Shaded}{}{}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
\newcommand{\RegionMarkerTok}[1]{{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
\newcommand{\NormalTok}[1]{{#1}}
% Additional commands for more recent versions of Pandoc
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}}
\newcommand{\ImportTok}[1]{{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}}
\newcommand{\BuiltInTok}[1]{{#1}}
\newcommand{\ExtensionTok}[1]{{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
% Define a nice break command that doesn't care if a line doesn't already
% exist.
\def\br{\hspace*{\fill} \\* }
% Math Jax compatability definitions
\def\gt{>}
\def\lt{<}
% Document parameters
\title{a\_data\_processing\_a}
% Pygments definitions
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
\let\PY@ul=\relax \let\PY@tc=\relax%
\let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
\PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
\PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}
\expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}}
\expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}}
\expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}}
\expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}}
\expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}}
\expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit}
\expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf}
\expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
\expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
\expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother
% Exact colors from NB
\definecolor{incolor}{rgb}{0.0, 0.0, 0.5}
\definecolor{outcolor}{rgb}{0.545, 0.0, 0.0}
% Prevent overflowing lines due to hard-to-break entities
\sloppy
% Setup hyperref package
\hypersetup{
breaklinks=true, % so long urls are correctly broken across lines
colorlinks=true,
urlcolor=urlcolor,
linkcolor=linkcolor,
citecolor=citecolor,
}
% Slightly bigger margins than the latex defaults
\geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
\begin{document}
\maketitle
\subsubsection{Imports}\label{imports}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}1}]:} \PY{c+c1}{\PYZsh{} Imports}
\PY{k+kn}{import} \PY{n+nn}{pickle}
\PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k}{as} \PY{n+nn}{np}
\PY{k+kn}{import} \PY{n+nn}{pandas} \PY{k}{as} \PY{n+nn}{pd}
\PY{k+kn}{import} \PY{n+nn}{urllib}
\PY{k+kn}{from} \PY{n+nn}{bs4} \PY{k}{import} \PY{n}{BeautifulSoup}
\PY{k+kn}{from} \PY{n+nn}{pandas\PYZus{}datareader} \PY{k}{import} \PY{n}{data} \PY{k}{as} \PY{n}{web}
\end{Verbatim}
\section{Part One - Data Acquisition and
Cleaning}\label{part-one---data-acquisition-and-cleaning}
\subsection{Web Scraping}\label{web-scraping}
Web scraping the list of S\&P 500 companies from website
https://en.wikipedia.org/wiki/List\_of\_S\%26P\_500\_companies As of
January 20, 2018. NOTE: Saved a local copy 'data/201801201706 - List of
S\&P 500 companies - Wikipedia.html'
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/42225204/use\PYZhy{}pandas\PYZhy{}to\PYZhy{}get\PYZhy{}multiple\PYZhy{}tables\PYZhy{}from\PYZhy{}webpage}
\PY{n}{url} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{https://en.wikipedia.org/wiki/List\PYZus{}of\PYZus{}S}\PY{l+s+s1}{\PYZpc{}}\PY{l+s+s1}{26P\PYZus{}500\PYZus{}companies}\PY{l+s+s1}{\PYZsq{}}
\PY{n}{html\PYZus{}table} \PY{o}{=} \PY{n}{urllib}\PY{o}{.}\PY{n}{request}\PY{o}{.}\PY{n}{urlopen}\PY{p}{(}\PY{n}{url}\PY{p}{)}\PY{o}{.}\PY{n}{read}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} fix HTML}
\PY{n}{soup} \PY{o}{=} \PY{n}{BeautifulSoup}\PY{p}{(}\PY{n}{html\PYZus{}table}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{html.parser}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{} warn! id ratings\PYZhy{}table is your page specific}
\PY{k}{for} \PY{n}{table} \PY{o+ow}{in} \PY{n}{soup}\PY{o}{.}\PY{n}{findChildren}\PY{p}{(}\PY{n}{attrs}\PY{o}{=}\PY{p}{\PYZob{}}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{id}\PY{l+s+s1}{\PYZsq{}}\PY{p}{:} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{ratings\PYZhy{}table}\PY{l+s+s1}{\PYZsq{}}\PY{p}{\PYZcb{}}\PY{p}{)}\PY{p}{:}
\PY{k}{for} \PY{n}{c} \PY{o+ow}{in} \PY{n}{table}\PY{o}{.}\PY{n}{children}\PY{p}{:}
\PY{k}{if} \PY{n}{c}\PY{o}{.}\PY{n}{name} \PY{o+ow}{in} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{tbody}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{thead}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{:}
\PY{n}{c}\PY{o}{.}\PY{n}{unwrap}\PY{p}{(}\PY{p}{)}
\PY{n}{list\PYZus{}df} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}html}\PY{p}{(}\PY{n+nb}{str}\PY{p}{(}\PY{n}{soup}\PY{p}{)}\PY{p}{,} \PY{n}{flavor}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{bs4}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{}len(list\PYZus{}df[0])}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} List of dataframes.}
\PY{c+c1}{\PYZsh{} The first Dataframe in the list is the conversion of the HTML table}
\PY{c+c1}{\PYZsh{} to a Pandas Dataframe. The first one is the one we care about. It is the }
\PY{c+c1}{\PYZsh{} S\PYZam{}P 500 Component Stocks.}
\PY{n+nb}{type}\PY{p}{(}\PY{n}{list\PYZus{}df}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} Randomly sample axis, in this case None looks like the rows.}
\PY{n}{list\PYZus{}df}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{o}{.}\PY{n}{sample}\PY{p}{(}\PY{l+m+mi}{10}\PY{p}{,}
\PY{n}{random\PYZus{}state} \PY{o}{=} \PY{k+kc}{None}\PY{p}{)}
\end{Verbatim}
\subsubsection{Pass Pandas Dataframe we care about to a more intuitive
variable
name.}\label{pass-pandas-dataframe-we-care-about-to-a-more-intuitive-variable-name.}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}raw\PYZus{}data} \PY{o}{=} \PY{n}{list\PYZus{}df}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}
\end{Verbatim}
\subsubsection{Save raw data to .csv}\label{save-raw-data-to-.csv}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{file\PYZus{}name\PYZus{}web\PYZus{}scrap\PYZus{}raw\PYZus{}data} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/sp500\PYZus{}component\PYZus{}stocks\PYZus{}raw\PYZus{}data\PYZus{}201801201730.csv}\PY{l+s+s1}{\PYZsq{}}
\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}raw\PYZus{}data}\PY{o}{.}\PY{n}{to\PYZus{}csv}\PY{p}{(}\PY{n}{file\PYZus{}name\PYZus{}web\PYZus{}scrap\PYZus{}raw\PYZus{}data}\PY{p}{)}
\end{Verbatim}
\subsubsection{Work from loaded raw data instead of web scrapping each
time.}\label{work-from-loaded-raw-data-instead-of-web-scrapping-each-time.}
The web site can change over time where as we are data locking from this
point on and using the saved/cached version. If we need to web scrap
again, run the first section. NOTE: The web address may change and/or
the format may change.
\subsubsection{Load raw data from file}\label{load-raw-data-from-file}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} Using same variable name.}
\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}raw\PYZus{}data} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}csv}\PY{p}{(} \PY{n}{file\PYZus{}name\PYZus{}web\PYZus{}scrap\PYZus{}raw\PYZus{}data} \PY{p}{)}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}raw\PYZus{}data}\PY{o}{.}\PY{n}{sample}\PY{p}{(}\PY{l+m+mi}{10}\PY{p}{,} \PY{n}{random\PYZus{}state} \PY{o}{=} \PY{k+kc}{None}\PY{p}{)}
\end{Verbatim}
\subsubsection{Clean the data. Make the first row the head for the
columns.}\label{clean-the-data.-make-the-first-row-the-head-for-the-columns.}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} Make the first row the header column}
\PY{c+c1}{\PYZsh{} NOTE: This does not get rid of the row.}
\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned} \PY{o}{=} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}raw\PYZus{}data}\PY{o}{.}\PY{n}{copy}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{o}{.}\PY{n}{columns} \PY{o}{=} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Re\PYZhy{}index and drop the first row.}
\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned} \PY{o}{=} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{o}{.}\PY{n}{reindex}\PY{p}{(}\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{o}{.}\PY{n}{index}\PY{o}{.}\PY{n}{drop}\PY{p}{(}\PY{l+m+mi}{0}\PY{p}{)}\PY{p}{)}
\PY{c+c1}{\PYZsh{} \PYZsh{} Keep columns of interest.}
\PY{c+c1}{\PYZsh{} \PYZsh{} https://stackoverflow.com/questions/14940743/selecting\PYZhy{}excluding\PYZhy{}sets\PYZhy{}of\PYZhy{}columns\PYZhy{}in\PYZhy{}pandas}
\PY{c+c1}{\PYZsh{} columns\PYZus{}to\PYZus{}keep = [\PYZsq{}GICS Sector\PYZsq{}, \PYZsq{}GICS Sub Industry\PYZsq{}]}
\PY{c+c1}{\PYZsh{} df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned = df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned[columns\PYZus{}to\PYZus{}keep]}
\PY{c+c1}{\PYZsh{} This is the main industry, select only the rows for ticker symbols for the main industry.}
\PY{c+c1}{\PYZsh{} NOTE: The main industry is System Software (i.e. operating systems companies like Red Hat or Microsoft.)}
\PY{c+c1}{\PYZsh{} Not just broad }
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/17071871/select\PYZhy{}rows\PYZhy{}from\PYZhy{}a\PYZhy{}dataframe\PYZhy{}based\PYZhy{}on\PYZhy{}values\PYZhy{}in\PYZhy{}a\PYZhy{}column\PYZhy{}in\PYZhy{}pandas}
\PY{c+c1}{\PYZsh{}df\PYZus{}tickers\PYZus{}for\PYZus{}information\PYZus{}technology = df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned.loc[df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned[\PYZsq{}GICS Sector\PYZsq{}] == \PYZsq{}Information Technology\PYZsq{}]}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} not plural and ignore case:}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/32616261/filtering\PYZhy{}pandas\PYZhy{}dataframe\PYZhy{}rows\PYZhy{}by\PYZhy{}contains\PYZhy{}str}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{o}{.}\PY{n}{loc}\PY{p}{[}\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{GICS Sub Industry}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{str}\PY{o}{.}\PY{n}{contains}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{software}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{case}\PY{o}{=}\PY{k+kc}{False}\PY{p}{)}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Related industry, semiconductors}
\PY{c+c1}{\PYZsh{} not plural and ignore case:}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/32616261/filtering\PYZhy{}pandas\PYZhy{}dataframe\PYZhy{}rows\PYZhy{}by\PYZhy{}contains\PYZhy{}str}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{o}{.}\PY{n}{loc}\PY{p}{[}\PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{GICS Sub Industry}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{str}\PY{o}{.}\PY{n}{contains}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{semiconductor}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{case}\PY{o}{=}\PY{k+kc}{False}\PY{p}{)}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Reset index.}
\PY{c+c1}{\PYZsh{} NOTE: drop = True means do not make a new index and keep old.}
\PY{c+c1}{\PYZsh{} inplace = True means update this variable and not return a copy}
\PY{c+c1}{\PYZsh{} leaving original intact.}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{reset\PYZus{}index}\PY{p}{(}\PY{n}{drop} \PY{o}{=} \PY{k+kc}{True}\PY{p}{,}
\PY{n}{inplace} \PY{o}{=} \PY{k+kc}{True}\PY{p}{)}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{reset\PYZus{}index}\PY{p}{(}\PY{n}{drop} \PY{o}{=} \PY{k+kc}{True}\PY{p}{,}
\PY{n}{inplace} \PY{o}{=} \PY{k+kc}{True}\PY{p}{)}
\PY{c+c1}{\PYZsh{} This is the related industry.}
\PY{c+c1}{\PYZsh{} NOTE: The related industry is semiconductors.}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{}df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned.sample(10, random\PYZus{}state = None)}
\PY{c+c1}{\PYZsh{}df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned.head(10)}
\PY{c+c1}{\PYZsh{}df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned.tail(10)}
\PY{c+c1}{\PYZsh{}df\PYZus{}tickers\PYZus{}for\PYZus{}software}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}
\end{Verbatim}
\subsubsection{Save clean data to .csv}\label{save-clean-data-to-.csv}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{df\PYZus{}sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned}\PY{o}{.}\PY{n}{to\PYZus{}csv}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/sp500\PYZus{}component\PYZus{}stocks\PYZus{}cleaned\PYZus{}data\PYZus{}201801201826.csv}\PY{l+s+s1}{\PYZsq{}} \PY{p}{)}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{to\PYZus{}csv}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}main\PYZus{}industry\PYZus{}software\PYZus{}201801201826.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{to\PYZus{}csv}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}related\PYZus{}industry\PYZus{}semiconductor\PYZus{}201801201826.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\end{Verbatim}
\subsubsection{Load data derived from web data
scraping.}\label{load-data-derived-from-web-data-scraping.}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}csv}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}main\PYZus{}industry\PYZus{}software\PYZus{}201801201826.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{c+c1}{\PYZsh{} Use the first column as the index}
\PY{n}{index\PYZus{}col} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{)}
\PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}csv}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}related\PYZus{}industry\PYZus{}semiconductor\PYZus{}201801201826.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{c+c1}{\PYZsh{} Use the first column as the index}
\PY{n}{index\PYZus{}col} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{)}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}software}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}
\end{Verbatim}
\section{Part Two - Data Processing}\label{part-two---data-processing}
\subsubsection{Get ticker symbols for the main industry and related
industry}\label{get-ticker-symbols-for-the-main-industry-and-related-industry}
\subsubsection{From Rubric}\label{from-rubric}
\subsubsection{Step 1: List down all stocks in the
industry}\label{step-1-list-down-all-stocks-in-the-industry}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} Get the value from the column, which is a Pandas Series, and convert to a Python List.}
\PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Ticker symbol}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{tolist}\PY{p}{(}\PY{p}{)}
\PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{df\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Ticker symbol}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{tolist}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}345}]:} \PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}software}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}345}]:} ['ADBE',
'ADP',
'ADSK',
'AKAM',
'ANSS',
'ATVI',
'CA',
'CDNS',
'CRM',
'CTXS',
'EA',
'EBAY',
'FB',
'FIS',
'FISV',
'GOOG',
'GOOGL',
'INTU',
'MA',
'MSFT',
'NFLX',
'NTAP',
'ORCL',
'PAYX',
'RHT',
'SNPS',
'SYMC',
'TSS',
'V',
'VRSN',
'WU']
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}346}]:} \PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}346}]:} ['ADI',
'AMAT',
'AMD',
'AVGO',
'INTC',
'KLAC',
'LRCX',
'MCHP',
'MU',
'NVDA',
'QCOM',
'QRVO',
'SWKS',
'TXN',
'XLNX']
\end{Verbatim}
\subsubsection{From Rubric}\label{from-rubric}
\subsubsection{Step 2: Collect last one year stock price data for these
stocks.}\label{step-2-collect-last-one-year-stock-price-data-for-these-stocks.}
\paragraph{Main Industry}\label{main-industry}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{ticker} \PY{o+ow}{in} \PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{:}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{n}{ticker}\PY{p}{]} \PY{o}{=} \PY{n}{web}\PY{o}{.}\PY{n}{get\PYZus{}data\PYZus{}quandl}\PY{p}{(}\PY{n}{ticker}\PY{p}{,}
\PY{n}{start} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1/19/2017}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{n}{end} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1/19/2018}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{} df\PYZus{}temp\PYZus{}a = []}
\PY{c+c1}{\PYZsh{} for ticker in list\PYZus{}tickers\PYZus{}for\PYZus{}software:}
\PY{c+c1}{\PYZsh{} \PYZsh{} We get all data available}
\PY{c+c1}{\PYZsh{} df\PYZus{}temp\PYZus{}a = web.get\PYZus{}data\PYZus{}quandl(ticker)}
\PY{c+c1}{\PYZsh{} \PYZsh{} The data is dated latest first.}
\PY{c+c1}{\PYZsh{} \PYZsh{} We only want a year, so keep only 252 days.}
\PY{c+c1}{\PYZsh{} dict\PYZus{}tickers\PYZus{}for\PYZus{}software[ticker] = df\PYZus{}temp\PYZus{}a[:252]}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{ticker} \PY{o+ow}{in} \PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{:}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{n}{ticker}\PY{p}{]} \PY{o}{=} \PY{n}{web}\PY{o}{.}\PY{n}{get\PYZus{}data\PYZus{}quandl}\PY{p}{(}\PY{n}{ticker}\PY{p}{,}
\PY{n}{start} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1/19/2017}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{n}{end} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1/19/2018}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{} for ticker in list\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors:}
\PY{c+c1}{\PYZsh{} \PYZsh{} We get all data available}
\PY{c+c1}{\PYZsh{} df\PYZus{}temp\PYZus{}a = web.get\PYZus{}data\PYZus{}quandl(ticker)}
\PY{c+c1}{\PYZsh{} \PYZsh{} The data is dated latest first.}
\PY{c+c1}{\PYZsh{} \PYZsh{} We only want a year, so keep only 252 days.}
\PY{c+c1}{\PYZsh{} dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors[ticker] = df\PYZus{}temp\PYZus{}a[:252]}
\end{Verbatim}
\subsubsection{Get SPX, the ticker that represents the S\&P 500 to
compare market
returns}\label{get-spx-the-ticker-that-represents-the-sp-500-to-compare-market-returns}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}81}]:} \PY{n}{spx\PYZus{}ticker} \PY{o}{=} \PY{n}{web}\PY{o}{.}\PY{n}{DataReader}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{SPX}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{n}{data\PYZus{}source} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{yahoo}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{n}{start} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1/19/2017}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{n}{end} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1/19/2018}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}82}]:} \PY{n}{spx\PYZus{}ticker}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}82}]:} Open High Low Close Adj Close Volume
Date
2017-01-19 0.006 0.006 0.006 0.006 0.006 0
2017-01-20 0.045 0.045 0.045 0.045 0.045 0
2017-01-23 0.045 0.045 0.045 0.045 0.045 58000
2017-01-24 0.045 0.045 0.045 0.045 0.045 19000
2017-01-25 0.045 0.050 0.045 0.045 0.045 204250
2017-01-26 0.050 0.050 0.045 0.045 0.045 46000
2017-01-27 0.045 0.045 0.045 0.045 0.045 12000
2017-01-30 0.006 0.006 0.006 0.006 0.006 0
2017-01-31 0.040 0.040 0.040 0.040 0.040 8000
2017-02-01 0.045 0.045 0.045 0.045 0.045 28000
2017-02-02 0.045 0.045 0.045 0.045 0.045 20000
2017-02-03 0.045 0.045 0.045 0.045 0.045 0
2017-02-06 0.045 0.045 0.045 0.045 0.045 4000
2017-02-07 0.040 0.040 0.040 0.040 0.040 117735
2017-02-08 0.040 0.040 0.040 0.040 0.040 1348
2017-02-09 0.040 0.040 0.040 0.040 0.040 1000
2017-02-10 0.040 0.040 0.040 0.040 0.040 0
2017-02-13 0.040 0.040 0.040 0.040 0.040 0
2017-02-14 0.040 0.040 0.040 0.040 0.040 1000
2017-02-15 0.050 0.050 0.045 0.045 0.045 16000
2017-02-16 0.050 0.050 0.050 0.050 0.050 6320
2017-02-17 0.045 0.045 0.045 0.045 0.045 28200
2017-02-21 0.045 0.045 0.045 0.045 0.045 100500
2017-02-22 0.045 0.045 0.045 0.045 0.045 85000
2017-02-23 0.005 0.005 0.005 0.005 0.005 0
2017-02-24 0.045 0.045 0.045 0.045 0.045 46000
2017-02-27 0.005 0.005 0.005 0.005 0.005 0
2017-02-28 0.005 0.006 0.005 0.006 0.006 3234898
2017-03-01 0.045 0.045 0.040 0.040 0.040 78845
2017-03-02 0.008 0.010 0.008 0.009 0.009 3692497
{\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 0.035 0.035 0.035 0.035 0.035 27000
2017-12-07 0.035 0.035 0.035 0.035 0.035 40590
2017-12-08 0.035 0.035 0.035 0.035 0.035 29000
2017-12-11 0.035 0.035 0.035 0.035 0.035 237500
2017-12-12 0.035 0.040 0.035 0.035 0.035 95000
2017-12-13 0.040 0.040 0.040 0.040 0.040 20000
2017-12-14 0.035 0.035 0.035 0.035 0.035 33000
2017-12-15 0.040 0.040 0.040 0.040 0.040 59000
2017-12-18 0.040 0.040 0.040 0.040 0.040 9300
2017-12-19 0.035 0.035 0.035 0.035 0.035 40560
2017-12-20 0.040 0.040 0.040 0.040 0.040 100000
2017-12-21 0.040 0.045 0.035 0.045 0.045 56500
2017-12-22 0.045 0.045 0.045 0.045 0.045 0
2017-12-26 0.045 0.045 0.045 0.045 0.045 0
2017-12-27 0.040 0.040 0.035 0.035 0.035 36820
2017-12-28 0.040 0.045 0.040 0.045 0.045 5550
2017-12-29 0.035 0.045 0.035 0.045 0.045 18320
2018-01-02 0.045 0.045 0.040 0.040 0.040 30000
2018-01-03 0.035 0.040 0.035 0.040 0.040 25350
2018-01-04 0.035 0.035 0.035 0.035 0.035 49000
2018-01-05 0.040 0.045 0.035 0.045 0.045 100000
2018-01-08 0.045 0.045 0.040 0.045 0.045 37500
2018-01-09 0.040 0.050 0.040 0.050 0.050 115290
2018-01-10 0.050 0.065 0.050 0.060 0.060 271450
2018-01-11 0.060 0.065 0.055 0.060 0.060 55515
2018-01-12 0.055 0.055 0.055 0.055 0.055 7646
2018-01-16 0.060 0.065 0.055 0.055 0.055 307800
2018-01-17 0.050 0.055 0.050 0.055 0.055 52000
2018-01-18 0.055 0.055 0.050 0.055 0.055 23070
2018-01-19 0.055 0.055 0.055 0.055 0.055 131000
[253 rows x 6 columns]
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} \PYZsh{}dict\PYZus{}tickers\PYZus{}for\PYZus{}software[\PYZsq{}MSFT\PYZsq{}][\PYZsq{}2017\PYZhy{}01\PYZhy{}19\PYZsq{}]}
\PY{c+c1}{\PYZsh{} df\PYZus{}temp\PYZus{}a = dict\PYZus{}tickers\PYZus{}for\PYZus{}software[\PYZsq{}MSFT\PYZsq{}].reset\PYZus{}index()}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} \PYZsh{}df\PYZus{}temp\PYZus{}a.iloc[:252]}
\PY{c+c1}{\PYZsh{} \PYZsh{} df\PYZus{}temp\PYZus{}b = df\PYZus{}temp\PYZus{}a.set\PYZus{}index( df\PYZus{}temp\PYZus{}a.iloc[:252][\PYZsq{}Date\PYZsq{}],}
\PY{c+c1}{\PYZsh{} \PYZsh{} drop = True )}
\PY{c+c1}{\PYZsh{} df\PYZus{}temp\PYZus{}c = df\PYZus{}temp\PYZus{}a.iloc[:252]}
\PY{c+c1}{\PYZsh{} \PYZsh{}df\PYZus{}temp\PYZus{}b = dict\PYZus{}tickers\PYZus{}for\PYZus{}software[\PYZsq{}MSFT\PYZsq{}].iloc[\PYZsq{}1/19/2017\PYZsq{}:\PYZsq{}1/19/2018\PYZsq{}]}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} df\PYZus{}temp\PYZus{}a = dict\PYZus{}tickers\PYZus{}for\PYZus{}software[\PYZsq{}MSFT\PYZsq{}]}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} df\PYZus{}temp\PYZus{}a.iloc[:252]}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{keys}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{MSFT}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{keys}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{INTC}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\end{Verbatim}
\subsubsection{Save Data to Pickle}\label{save-data-to-pickle}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/11641493/how\PYZhy{}to\PYZhy{}cpickle\PYZhy{}dump\PYZhy{}and\PYZhy{}load\PYZhy{}separate\PYZhy{}dictionaries\PYZhy{}to\PYZhy{}the\PYZhy{}same\PYZhy{}file}
\PY{n}{filename} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/dict\PYZus{}tickers\PYZus{}for\PYZus{}software\PYZus{}201801201933.pickle}\PY{l+s+s1}{\PYZsq{}}
\PY{k}{with} \PY{n+nb}{open}\PY{p}{(}\PY{n}{filename}\PY{p}{,}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{wb}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{k}{as} \PY{n}{fp}\PY{p}{:}
\PY{n}{pickle}\PY{o}{.}\PY{n}{dump}\PY{p}{(}\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{,}\PY{n}{fp}\PY{p}{)}
\PY{n}{filename} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors\PYZus{}201801201933.pickle}\PY{l+s+s1}{\PYZsq{}}
\PY{k}{with} \PY{n+nb}{open}\PY{p}{(}\PY{n}{filename}\PY{p}{,}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{wb}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{k}{as} \PY{n}{fp}\PY{p}{:}
\PY{n}{pickle}\PY{o}{.}\PY{n}{dump}\PY{p}{(}\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{,}\PY{n}{fp}\PY{p}{)}
\end{Verbatim}
\subsubsection{Loading Data from Pickle}\label{loading-data-from-pickle}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}26}]:} \PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/11641493/how\PYZhy{}to\PYZhy{}cpickle\PYZhy{}dump\PYZhy{}and\PYZhy{}load\PYZhy{}separate\PYZhy{}dictionaries\PYZhy{}to\PYZhy{}the\PYZhy{}same\PYZhy{}file}
\PY{n}{filename\PYZus{}software} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/dict\PYZus{}tickers\PYZus{}for\PYZus{}software\PYZus{}201801201933.pickle}\PY{l+s+s1}{\PYZsq{}}
\PY{n}{filename\PYZus{}semiconductor} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors\PYZus{}201801201933.pickle}\PY{l+s+s1}{\PYZsq{}}
\PY{k}{with} \PY{n+nb}{open}\PY{p}{(}\PY{n}{filename\PYZus{}software}\PY{p}{,}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{rb}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{k}{as} \PY{n}{fp}\PY{p}{:}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{=}\PY{n}{pickle}\PY{o}{.}\PY{n}{load}\PY{p}{(}\PY{n}{fp}\PY{p}{)}
\PY{k}{with} \PY{n+nb}{open}\PY{p}{(}\PY{n}{filename\PYZus{}semiconductor}\PY{p}{,}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{rb}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{k}{as} \PY{n}{fp}\PY{p}{:}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{=}\PY{n}{pickle}\PY{o}{.}\PY{n}{load}\PY{p}{(}\PY{n}{fp}\PY{p}{)}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{keys}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{MSFT}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{keys}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{INTC}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\end{Verbatim}
\subsubsection{Create Pandas Panels to have multiple pages for
Dataframes.}\label{create-pandas-panels-to-have-multiple-pages-for-dataframes.}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}3}]:} \PY{c+c1}{\PYZsh{} https://www.tutorialspoint.com/python\PYZus{}pandas/python\PYZus{}pandas\PYZus{}panel.htm}
\PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{Panel}\PY{p}{(}\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{)}
\PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{Panel}\PY{p}{(}\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{)}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}4}]:} \PY{n+nb}{dir}\PY{p}{(}\PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}4}]:} ['ADBE',
'ADP',
'ADSK',
'AKAM',
'ANSS',
'ATVI',
'CA',
'CDNS',
'CRM',
'CTXS',
'EA',
'EBAY',
'FB',
'FIS',
'FISV',
'GOOG',
'GOOGL',
'INTU',
'MA',
'MSFT',
'NFLX',
'NTAP',
'ORCL',
'PAYX',
'RHT',
'SNPS',
'SYMC',
'TSS',
'V',
'VRSN',
'WU',
'\_AXIS\_ALIASES',
'\_AXIS\_IALIASES',
'\_AXIS\_LEN',
'\_AXIS\_NAMES',
'\_AXIS\_NUMBERS',
'\_AXIS\_ORDERS',
'\_AXIS\_REVERSED',
'\_AXIS\_SLICEMAP',
'\_\_abs\_\_',
'\_\_add\_\_',
'\_\_and\_\_',
'\_\_array\_\_',
'\_\_array\_wrap\_\_',
'\_\_bool\_\_',
'\_\_bytes\_\_',
'\_\_class\_\_',
'\_\_contains\_\_',
'\_\_copy\_\_',
'\_\_deepcopy\_\_',
'\_\_delattr\_\_',
'\_\_delitem\_\_',
'\_\_dict\_\_',
'\_\_dir\_\_',
'\_\_div\_\_',
'\_\_doc\_\_',
'\_\_eq\_\_',
'\_\_finalize\_\_',
'\_\_floordiv\_\_',
'\_\_format\_\_',
'\_\_ge\_\_',
'\_\_getattr\_\_',
'\_\_getattribute\_\_',
'\_\_getitem\_\_',
'\_\_getstate\_\_',
'\_\_gt\_\_',
'\_\_hash\_\_',
'\_\_iadd\_\_',
'\_\_iand\_\_',
'\_\_ifloordiv\_\_',
'\_\_imod\_\_',
'\_\_imul\_\_',
'\_\_init\_\_',
'\_\_init\_subclass\_\_',
'\_\_invert\_\_',
'\_\_ior\_\_',
'\_\_ipow\_\_',
'\_\_isub\_\_',
'\_\_iter\_\_',
'\_\_itruediv\_\_',
'\_\_ixor\_\_',
'\_\_le\_\_',
'\_\_len\_\_',
'\_\_lt\_\_',
'\_\_mod\_\_',
'\_\_module\_\_',
'\_\_mul\_\_',
'\_\_ne\_\_',
'\_\_neg\_\_',
'\_\_new\_\_',
'\_\_nonzero\_\_',
'\_\_or\_\_',
'\_\_pow\_\_',
'\_\_radd\_\_',
'\_\_rand\_\_',
'\_\_rdiv\_\_',
'\_\_reduce\_\_',
'\_\_reduce\_ex\_\_',
'\_\_repr\_\_',
'\_\_rfloordiv\_\_',
'\_\_rmod\_\_',
'\_\_rmul\_\_',
'\_\_ror\_\_',
'\_\_round\_\_',
'\_\_rpow\_\_',
'\_\_rsub\_\_',
'\_\_rtruediv\_\_',
'\_\_rxor\_\_',
'\_\_setattr\_\_',
'\_\_setitem\_\_',
'\_\_setstate\_\_',
'\_\_sizeof\_\_',
'\_\_str\_\_',
'\_\_sub\_\_',
'\_\_subclasshook\_\_',
'\_\_truediv\_\_',
'\_\_unicode\_\_',
'\_\_weakref\_\_',
'\_\_xor\_\_',
'\_accessors',
'\_add\_aggregate\_operations',
'\_add\_numeric\_operations',
'\_add\_series\_only\_operations',
'\_add\_series\_or\_dataframe\_operations',
'\_agg\_by\_level',
'\_aggregate',
'\_aggregate\_multiple\_funcs',
'\_align\_frame',
'\_align\_series',
'\_apply\_1d',
'\_apply\_2d',
'\_at',
'\_box\_item\_values',
'\_builtin\_table',
'\_check\_inplace\_setting',
'\_check\_is\_chained\_assignment\_possible',
'\_check\_percentile',
'\_check\_setitem\_copy',
'\_clear\_item\_cache',
'\_clip\_with\_one\_bound',
'\_clip\_with\_scalar',
'\_combine',
'\_combine\_const',
'\_combine\_frame',
'\_combine\_panel',
'\_compare\_constructor',
'\_consolidate',
'\_consolidate\_inplace',
'\_construct\_axes\_dict',
'\_construct\_axes\_dict\_for\_slice',
'\_construct\_axes\_dict\_from',
'\_construct\_axes\_from\_arguments',
'\_construct\_return\_type',
'\_constructor',
'\_constructor\_expanddim',
'\_constructor\_sliced',
'\_convert',
'\_create\_indexer',
'\_cython\_table',
'\_deprecations',
'\_dir\_additions',
'\_dir\_deletions',
'\_drop\_axis',
'\_expand\_axes',
'\_extract\_axes',
'\_extract\_axes\_for\_slice',
'\_extract\_axis',
'\_from\_axes',
'\_get\_axis',
'\_get\_axis\_name',
'\_get\_axis\_number',
'\_get\_axis\_resolvers',
'\_get\_block\_manager\_axis',
'\_get\_bool\_data',
'\_get\_cacher',
'\_get\_index\_resolvers',
'\_get\_item\_cache',
'\_get\_join\_index',
'\_get\_numeric\_data',
'\_get\_plane\_axes',
'\_get\_plane\_axes\_index',
'\_get\_value',
'\_get\_values',
'\_getitem\_multilevel',
'\_gotitem',
'\_homogenize\_dict',
'\_iat',
'\_iget\_item\_cache',
'\_iloc',
'\_indexed\_same',
'\_info\_axis',
'\_info\_axis\_name',
'\_info\_axis\_number',
'\_init\_arrays',
'\_init\_data',
'\_init\_dict',
'\_init\_matrix',
'\_init\_mgr',
'\_internal\_names',
'\_internal\_names\_set',
'\_is\_builtin\_func',
'\_is\_cached',
'\_is\_cython\_func',
'\_is\_datelike\_mixed\_type',
'\_is\_mixed\_type',
'\_is\_numeric\_mixed\_type',
'\_is\_view',
'\_ix',
'\_ixs',
'\_loc',
'\_maybe\_cache\_changed',
'\_maybe\_update\_cacher',
'\_metadata',
'\_needs\_reindex\_multi',
'\_obj\_with\_exclusions',
'\_prep\_ndarray',
'\_protect\_consolidate',
'\_reduce',
'\_reindex\_axes',
'\_reindex\_axis',
'\_reindex\_multi',
'\_reindex\_with\_indexers',
'\_repr\_data\_resource\_',
'\_repr\_latex\_',
'\_reset\_cache',
'\_reset\_cacher',
'\_selected\_obj',
'\_selection',
'\_selection\_list',
'\_selection\_name',
'\_set\_as\_cached',
'\_set\_axis',
'\_set\_axis\_name',
'\_set\_is\_copy',
'\_set\_item',
'\_set\_value',
'\_setup\_axes',
'\_shallow\_copy',
'\_slice',
'\_stat\_axis',
'\_stat\_axis\_name',
'\_stat\_axis\_number',
'\_take',
'\_to\_dict\_of\_blocks',
'\_try\_aggregate\_string\_function',
'\_typ',
'\_unpickle\_panel\_compat',
'\_update\_inplace',
'\_validate\_dtype',
'\_values',
'\_where',
'\_wrap\_result',
'\_xs',
'abs',
'add',
'add\_prefix',
'add\_suffix',
'agg',
'aggregate',
'align',
'all',
'any',
'apply',
'as\_matrix',
'asfreq',
'asof',
'astype',
'at',
'at\_time',
'axes',
'between\_time',
'bfill',
'bool',
'clip',
'clip\_lower',
'clip\_upper',
'compound',
'conform',
'copy',
'count',
'cummax',
'cummin',
'cumprod',
'cumsum',
'describe',
'div',
'divide',
'drop',
'dropna',
'dtypes',
'empty',
'eq',
'equals',
'ffill',
'fillna',
'filter',
'first',
'floordiv',
'fromDict',
'from\_dict',
'ftypes',
'ge',
'get',
'get\_dtype\_counts',
'get\_ftype\_counts',
'get\_value',
'get\_values',
'groupby',
'gt',
'head',
'iat',
'iloc',
'infer\_objects',
'interpolate',
'is\_copy',
'isna',
'isnull',
'items',
'iteritems',
'ix',
'join',
'keys',
'kurt',
'kurtosis',
'last',
'le',
'loc',
'lt',
'mad',
'major\_axis',
'major\_xs',
'mask',
'max',
'mean',
'median',
'min',
'minor\_axis',
'minor\_xs',
'mod',
'mul',
'multiply',
'ndim',
'ne',
'notna',
'notnull',
'pct\_change',
'pipe',
'pop',
'pow',
'prod',
'product',
'radd',
'rank',
'rdiv',
'reindex',
'reindex\_axis',
'reindex\_like',
'rename',
'rename\_axis',
'replace',
'resample',
'rfloordiv',
'rmod',
'rmul',
'round',
'rpow',
'rsub',
'rtruediv',
'sample',
'select',
'sem',
'set\_axis',
'set\_value',
'shape',
'shift',
'size',
'skew',
'slice\_shift',
'sort\_index',
'sort\_values',
'squeeze',
'std',
'sub',
'subtract',
'sum',
'swapaxes',
'swaplevel',
'tail',
'take',
'toLong',
'to\_clipboard',
'to\_dense',
'to\_excel',
'to\_frame',
'to\_hdf',
'to\_json',
'to\_latex',
'to\_long',
'to\_msgpack',
'to\_pickle',
'to\_sparse',
'to\_sql',
'to\_xarray',
'transpose',
'truediv',
'truncate',
'tshift',
'tz\_convert',
'tz\_localize',
'update',
'values',
'var',
'where',
'xs']
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}5}]:} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{items}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}5}]:} Index(['ADBE', 'ADP', 'ADSK', 'AKAM', 'ANSS', 'ATVI', 'CA', 'CDNS', 'CRM',
'CTXS', 'EA', 'EBAY', 'FB', 'FIS', 'FISV', 'GOOG', 'GOOGL', 'INTU',
'MA', 'MSFT', 'NFLX', 'NTAP', 'ORCL', 'PAYX', 'RHT', 'SNPS', 'SYMC',
'TSS', 'V', 'VRSN', 'WU'],
dtype='object')
\end{Verbatim}
\subsubsection{Save to Excel}\label{save-to-excel}
Saving to Excel allows to use each page as a ticker symbol as a stock.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}6}]:} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{to\PYZus{}excel}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}main\PYZus{}industry\PYZus{}software\PYZus{}201801211359.xls}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{to\PYZus{}excel}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}related\PYZus{}industry\PYZus{}semiconductor\PYZus{}201801211359.xls}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}83}]:} \PY{n}{spx\PYZus{}ticker}\PY{o}{.}\PY{n}{to\PYZus{}excel}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/ticker\PYZus{}spx\PYZus{}201801281357.xls}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\end{Verbatim}
\subsubsection{Load from Excel}\label{load-from-excel}
There does not seem to have a method to load an Excel file with multiple
pages back into a Pandas Panel. The workaround is to load the Excel file
as and Excel file object, find the sheet names (Excel pages,) and
reconstruct a Pandas Panel.
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}2}]:} \PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/26521266/using\PYZhy{}pandas\PYZhy{}to\PYZhy{}pd\PYZhy{}read\PYZhy{}excel\PYZhy{}for\PYZhy{}multiple\PYZhy{}worksheets\PYZhy{}of\PYZhy{}the\PYZhy{}same\PYZhy{}workbook}
\PY{c+c1}{\PYZsh{} Reconstruct Pandas Panel for Main industry Software:}
\PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{ExcelFile}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}main\PYZus{}industry\PYZus{}software\PYZus{}201801211359.xls}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{} .sheet\PYZus{}names is a property, not a method}
\PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{sheet\PYZus{}names}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{ticker} \PY{o+ow}{in} \PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{:}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{n}{ticker}\PY{p}{]} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}excel}\PY{p}{(} \PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{,}
\PY{n}{ticker}\PY{p}{,}
\PY{n}{index\PYZus{}col} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Date}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}}
\PY{c+c1}{\PYZsh{} Reconstruct Pandas Panel for related industry semiconductors:}
\PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{ExcelFile}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/tickers\PYZus{}related\PYZus{}industry\PYZus{}semiconductor\PYZus{}201801211359.xls}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{} .sheet\PYZus{}names is a property, not a method}
\PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{sheet\PYZus{}names}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{ticker} \PY{o+ow}{in} \PY{n}{list\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{:}
\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{n}{ticker}\PY{p}{]} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}excel}\PY{p}{(} \PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{,}
\PY{n}{ticker}\PY{p}{,}
\PY{n}{index\PYZus{}col} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Date}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\PY{c+c1}{\PYZsh{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}}
\PY{c+c1}{\PYZsh{} Create Pandas Panels to have multiple pages for Dataframes.}
\PY{c+c1}{\PYZsh{} https://www.tutorialspoint.com/python\PYZus{}pandas/python\PYZus{}pandas\PYZus{}panel.htm}
\PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{Panel}\PY{p}{(}\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{)}
\PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{Panel}\PY{p}{(}\PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}86}]:} \PY{n}{df\PYZus{}spx} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{read\PYZus{}excel}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{data/ticker\PYZus{}spx\PYZus{}201801281357.xls}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
\PY{n}{sheet\PYZus{}name} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{,}
\PY{n}{index\PYZus{}col} \PY{o}{=} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Date}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}87}]:} \PY{n}{df\PYZus{}spx}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}87}]:} Open High Low Close Adj Close Volume
Date
2017-01-19 0.006 0.006 0.006 0.006 0.006 0
2017-01-20 0.045 0.045 0.045 0.045 0.045 0
2017-01-23 0.045 0.045 0.045 0.045 0.045 58000
2017-01-24 0.045 0.045 0.045 0.045 0.045 19000
2017-01-25 0.045 0.050 0.045 0.045 0.045 204250
2017-01-26 0.050 0.050 0.045 0.045 0.045 46000
2017-01-27 0.045 0.045 0.045 0.045 0.045 12000
2017-01-30 0.006 0.006 0.006 0.006 0.006 0
2017-01-31 0.040 0.040 0.040 0.040 0.040 8000
2017-02-01 0.045 0.045 0.045 0.045 0.045 28000
2017-02-02 0.045 0.045 0.045 0.045 0.045 20000
2017-02-03 0.045 0.045 0.045 0.045 0.045 0
2017-02-06 0.045 0.045 0.045 0.045 0.045 4000
2017-02-07 0.040 0.040 0.040 0.040 0.040 117735
2017-02-08 0.040 0.040 0.040 0.040 0.040 1348
2017-02-09 0.040 0.040 0.040 0.040 0.040 1000
2017-02-10 0.040 0.040 0.040 0.040 0.040 0
2017-02-13 0.040 0.040 0.040 0.040 0.040 0
2017-02-14 0.040 0.040 0.040 0.040 0.040 1000
2017-02-15 0.050 0.050 0.045 0.045 0.045 16000
2017-02-16 0.050 0.050 0.050 0.050 0.050 6320
2017-02-17 0.045 0.045 0.045 0.045 0.045 28200
2017-02-21 0.045 0.045 0.045 0.045 0.045 100500
2017-02-22 0.045 0.045 0.045 0.045 0.045 85000
2017-02-23 0.005 0.005 0.005 0.005 0.005 0
2017-02-24 0.045 0.045 0.045 0.045 0.045 46000
2017-02-27 0.005 0.005 0.005 0.005 0.005 0
2017-02-28 0.005 0.006 0.005 0.006 0.006 3234898
2017-03-01 0.045 0.045 0.040 0.040 0.040 78845
2017-03-02 0.008 0.010 0.008 0.009 0.009 3692497
{\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 0.035 0.035 0.035 0.035 0.035 27000
2017-12-07 0.035 0.035 0.035 0.035 0.035 40590
2017-12-08 0.035 0.035 0.035 0.035 0.035 29000
2017-12-11 0.035 0.035 0.035 0.035 0.035 237500
2017-12-12 0.035 0.040 0.035 0.035 0.035 95000
2017-12-13 0.040 0.040 0.040 0.040 0.040 20000
2017-12-14 0.035 0.035 0.035 0.035 0.035 33000
2017-12-15 0.040 0.040 0.040 0.040 0.040 59000
2017-12-18 0.040 0.040 0.040 0.040 0.040 9300
2017-12-19 0.035 0.035 0.035 0.035 0.035 40560
2017-12-20 0.040 0.040 0.040 0.040 0.040 100000
2017-12-21 0.040 0.045 0.035 0.045 0.045 56500
2017-12-22 0.045 0.045 0.045 0.045 0.045 0
2017-12-26 0.045 0.045 0.045 0.045 0.045 0
2017-12-27 0.040 0.040 0.035 0.035 0.035 36820
2017-12-28 0.040 0.045 0.040 0.045 0.045 5550
2017-12-29 0.035 0.045 0.035 0.045 0.045 18320
2018-01-02 0.045 0.045 0.040 0.040 0.040 30000
2018-01-03 0.035 0.040 0.035 0.040 0.040 25350
2018-01-04 0.035 0.035 0.035 0.035 0.035 49000
2018-01-05 0.040 0.045 0.035 0.045 0.045 100000
2018-01-08 0.045 0.045 0.040 0.045 0.045 37500
2018-01-09 0.040 0.050 0.040 0.050 0.050 115290
2018-01-10 0.050 0.065 0.050 0.060 0.060 271450
2018-01-11 0.060 0.065 0.055 0.060 0.060 55515
2018-01-12 0.055 0.055 0.055 0.055 0.055 7646
2018-01-16 0.060 0.065 0.055 0.055 0.055 307800
2018-01-17 0.050 0.055 0.050 0.055 0.055 52000
2018-01-18 0.055 0.055 0.050 0.055 0.055 23070
2018-01-19 0.055 0.055 0.055 0.055 0.055 131000
[253 rows x 6 columns]
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}9}]:} \PY{n+nb}{dir}\PY{p}{(}\PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}9}]:} ['\_\_class\_\_',
'\_\_delattr\_\_',
'\_\_dict\_\_',
'\_\_dir\_\_',
'\_\_doc\_\_',
'\_\_enter\_\_',
'\_\_eq\_\_',
'\_\_exit\_\_',
'\_\_format\_\_',
'\_\_fspath\_\_',
'\_\_ge\_\_',
'\_\_getattribute\_\_',
'\_\_gt\_\_',
'\_\_hash\_\_',
'\_\_init\_\_',
'\_\_init\_subclass\_\_',
'\_\_le\_\_',
'\_\_lt\_\_',
'\_\_module\_\_',
'\_\_ne\_\_',
'\_\_new\_\_',
'\_\_reduce\_\_',
'\_\_reduce\_ex\_\_',
'\_\_repr\_\_',
'\_\_setattr\_\_',
'\_\_sizeof\_\_',
'\_\_str\_\_',
'\_\_subclasshook\_\_',
'\_\_weakref\_\_',
'\_io',
'\_parse\_excel',
'\_should\_parse',
'book',
'close',
'io',
'parse',
'sheet\_names']
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}10}]:} \PY{n}{xls\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{sheet\PYZus{}names}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}10}]:} ['ADBE',
'ADP',
'ADSK',
'AKAM',
'ANSS',
'ATVI',
'CA',
'CDNS',
'CRM',
'CTXS',
'EA',
'EBAY',
'FB',
'FIS',
'FISV',
'GOOG',
'GOOGL',
'INTU',
'MA',
'MSFT',
'NFLX',
'NTAP',
'ORCL',
'PAYX',
'RHT',
'SNPS',
'SYMC',
'TSS',
'V',
'VRSN',
'WU']
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}12}]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{keys}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}12}]:} dict\_keys(['ADBE', 'ADP', 'ADSK', 'AKAM', 'ANSS', 'ATVI', 'CA', 'CDNS', 'CRM', 'CTXS', 'EA', 'EBAY', 'FB', 'FIS', 'FISV', 'GOOG', 'GOOGL', 'INTU', 'MA', 'MSFT', 'NFLX', 'NTAP', 'ORCL', 'PAYX', 'RHT', 'SNPS', 'SYMC', 'TSS', 'V', 'VRSN', 'WU'])
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}13}]:} \PY{n}{dict\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{keys}\PY{p}{(}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}13}]:} dict\_keys(['ADBE', 'ADP', 'ADSK', 'AKAM', 'ANSS', 'ATVI', 'CA', 'CDNS', 'CRM', 'CTXS', 'EA', 'EBAY', 'FB', 'FIS', 'FISV', 'GOOG', 'GOOGL', 'INTU', 'MA', 'MSFT', 'NFLX', 'NTAP', 'ORCL', 'PAYX', 'RHT', 'SNPS', 'SYMC', 'TSS', 'V', 'VRSN', 'WU'])
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}16}]:} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{items}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}16}]:} Index(['ADBE', 'ADP', 'ADSK', 'AKAM', 'ANSS', 'ATVI', 'CA', 'CDNS', 'CRM',
'CTXS', 'EA', 'EBAY', 'FB', 'FIS', 'FISV', 'GOOG', 'GOOGL', 'INTU',
'MA', 'MSFT', 'NFLX', 'NTAP', 'ORCL', 'PAYX', 'RHT', 'SNPS', 'SYMC',
'TSS', 'V', 'VRSN', 'WU'],
dtype='object')
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}17}]:} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{items}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}17}]:} Index(['ADI', 'AMAT', 'AMD', 'AVGO', 'INTC', 'KLAC', 'LRCX', 'MCHP', 'MU',
'NVDA', 'QCOM', 'QRVO', 'SWKS', 'TXN', 'XLNX'],
dtype='object')
\end{Verbatim}
\subsubsection{From Rubric}\label{from-rubric}
\subsubsection{Step 3: Calculate the historical distance measure between
all the possible pairs of
stocks.}\label{step-3-calculate-the-historical-distance-measure-between-all-the-possible-pairs-of-stocks.}
First need to make Pandas DataFrame for both main industry (software)
and related industry (semiconductors) with the ticker symbol and closing
price.
NOTE: Using Adjusted Closing Price to take into account stock splits and
dividends.
https://www.investopedia.com/terms/a/adjusted\_closing\_price.asp
Simple trading strategy Illiquid stocks are removed from the investment
universe. Cumulative total return index is then created for each stock
(dividends included) and starting price during formation period is set
to \$1 (price normalization). Pairs are formed over a twelve-month
period (formation period) and are then traded in next six-month period
(trading period). The matching partner for each stock is found by
looking for the security that minimizes the sum of squared deviations
between two normalized price series. Top 20 pairs with the smallest
historical distance measure are then traded and long-short position is
opened when pair prices have diverged by two standard deviations and the
position is closed when prices revert back.
https://quantpedia.com/Screener/Details/12
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}4}]:} \PY{c+c1}{\PYZsh{} https://quantpedia.com/Screener/Details/12}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/18062135/combining\PYZhy{}two\PYZhy{}series\PYZhy{}into\PYZhy{}a\PYZhy{}dataframe\PYZhy{}in\PYZhy{}pandas}
\PY{c+c1}{\PYZsh{} Going to concat multiple Panda series into a Panda DataFrame}
\PY{n}{list\PYZus{}of\PYZus{}series\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{p}{[}\PY{p}{]}
\PY{n}{dict\PYZus{}of\PYZus{}dataframes\PYZus{}modified\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{ticker} \PY{o+ow}{in} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{items}\PY{p}{:}
\PY{n}{df\PYZus{}temp\PYZus{}e} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}}
\PY{c+c1}{\PYZsh{} Getting adjusted close. It takes into account stock split and}
\PY{c+c1}{\PYZsh{} dividend pay outs.}
\PY{c+c1}{\PYZsh{} https://www.investopedia.com/terms/a/adjusted\PYZus{}closing\PYZus{}price.asp}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Where price includes reinvested dividends from paper Pair Trading.}
\PY{c+c1}{\PYZsh{} Section 2.1 \PYZhy{} Pairs Formation Page 11.}
\PY{c+c1}{\PYZsh{} This means we do NOT include dividends as cumulative returns.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{n}{ticker}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{ExDividend}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{n}{ticker}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{ExDividend}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Forward fill then backward fill data.}
\PY{n}{df\PYZus{}temp\PYZus{}e} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{ffill}\PY{p}{(}\PY{p}{)}\PY{o}{.}\PY{n}{bfill}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Adjusted close normalized by dividing by the price of day 1 of the formation period.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose\PYZus{}Normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Get the daily returns}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/20000726/calculate\PYZhy{}daily\PYZhy{}returns\PYZhy{}with\PYZhy{}pandas\PYZhy{}dataframe}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{\PYZhy{}} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{shift}\PY{p}{(}\PY{l+m+mi}{1}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Fill all not a number (NaN) with 0.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{fillna}\PY{p}{(}\PY{l+m+mf}{0.0}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Get cumulative returns}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{cumsum}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Add dividend pay out.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}with\PYZus{}dividends}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{ExDividend}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Normalized cumulative returns}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Normalized cumulative returns by dividing by the price of day 1 of the formation period.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}with\PYZus{}dividends\PYZus{}normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}with\PYZus{}dividends}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Forward fill then backward fill data.}
\PY{n}{df\PYZus{}temp\PYZus{}e} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{ffill}\PY{p}{(}\PY{p}{)}\PY{o}{.}\PY{n}{bfill}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Gather data for Debugging}
\PY{n}{df\PYZus{}temp\PYZus{}f} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{copy}\PY{p}{(}\PY{p}{)}
\PY{n}{dict\PYZus{}of\PYZus{}dataframes\PYZus{}modified\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{n}{ticker}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}f}
\PY{c+c1}{\PYZsh{} Copy only the Pandas Series}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Where price includes reinvested dividends from paper Pair Trading.}
\PY{c+c1}{\PYZsh{} Section 2.1 \PYZhy{} Pairs Formation Page 11.}
\PY{c+c1}{\PYZsh{} This means we do NOT include dividends as cumulative returns.}
\PY{n}{ds\PYZus{}temp\PYZus{}a} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}f}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose\PYZus{}Normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Name the Pandas Series}
\PY{n}{ds\PYZus{}temp\PYZus{}a}\PY{o}{.}\PY{n}{name} \PY{o}{=} \PY{n}{ticker}
\PY{c+c1}{\PYZsh{} Rename the column in the Pandas Series.}
\PY{n}{ds\PYZus{}temp\PYZus{}a}\PY{o}{.}\PY{n}{columns} \PY{o}{=} \PY{p}{[}\PY{n}{ticker}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Append the Pandas Series to the list.}
\PY{n}{list\PYZus{}of\PYZus{}series\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{append}\PY{p}{(} \PY{n}{ds\PYZus{}temp\PYZus{}a} \PY{p}{)}
\PY{c+c1}{\PYZsh{} Make Pandas DataFrame from Pandas Series.}
\PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}software} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{concat}\PY{p}{(}\PY{n}{list\PYZus{}of\PYZus{}series\PYZus{}for\PYZus{}software}\PY{p}{,} \PY{n}{axis} \PY{o}{=} \PY{l+m+mi}{1}\PY{p}{)}
\PY{c+c1}{\PYZsh{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}}
\PY{c+c1}{\PYZsh{} Going to concat multiple Panda series into a Panda DataFrame}
\PY{n}{list\PYZus{}of\PYZus{}series\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{p}{[}\PY{p}{]}
\PY{k}{for} \PY{n}{ticker} \PY{o+ow}{in} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{items}\PY{p}{:}
\PY{n}{df\PYZus{}temp\PYZus{}e} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}\PYZhy{}}
\PY{c+c1}{\PYZsh{} Getting adjusted close. It takes into account stock split and}
\PY{c+c1}{\PYZsh{} dividend pay outs.}
\PY{c+c1}{\PYZsh{} https://www.investopedia.com/terms/a/adjusted\PYZus{}closing\PYZus{}price.asp}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Where price includes reinvested dividends from paper Pair Trading.}
\PY{c+c1}{\PYZsh{} Section 2.1 \PYZhy{} Pairs Formation Page 11.}
\PY{c+c1}{\PYZsh{} This means we do NOT include dividends as cumulative returns.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{n}{ticker}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{ExDividend}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{dp\PYZus{}tickers\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{n}{ticker}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{ExDividend}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Forward fill then backward fill data.}
\PY{n}{df\PYZus{}temp\PYZus{}e} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{ffill}\PY{p}{(}\PY{p}{)}\PY{o}{.}\PY{n}{bfill}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Adjusted close normalized by dividing by the price of day 1 of the formation period.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose\PYZus{}Normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Get the daily returns}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/20000726/calculate\PYZhy{}daily\PYZhy{}returns\PYZhy{}with\PYZhy{}pandas\PYZhy{}dataframe}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{\PYZhy{}} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{shift}\PY{p}{(}\PY{l+m+mi}{1}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Fill all not a number (NaN) with 0.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{fillna}\PY{p}{(}\PY{l+m+mf}{0.0}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Get cumulative returns}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{cumsum}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Normalized cumulative returns}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Add dividend pay out.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}with\PYZus{}dividends}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{ExDividend}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Normalized cumulative returns by dividing by the price of day 1 of the formation period.}
\PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}with\PYZus{}dividends\PYZus{}normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{cumulative\PYZus{}returns\PYZus{}with\PYZus{}dividends}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Forward fill then backward fill data.}
\PY{n}{df\PYZus{}temp\PYZus{}e} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{ffill}\PY{p}{(}\PY{p}{)}\PY{o}{.}\PY{n}{bfill}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Gather data for Debugging}
\PY{n}{df\PYZus{}temp\PYZus{}f} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}e}\PY{o}{.}\PY{n}{copy}\PY{p}{(}\PY{p}{)}
\PY{n}{dict\PYZus{}of\PYZus{}dataframes\PYZus{}modified\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{n}{ticker}\PY{p}{]} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}f}
\PY{c+c1}{\PYZsh{} Copy only the Pandas Series}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Where price includes reinvested dividends from paper Pair Trading.}
\PY{c+c1}{\PYZsh{} Section 2.1 \PYZhy{} Pairs Formation Page 11.}
\PY{c+c1}{\PYZsh{} This means we do NOT include dividends as cumulative returns.}
\PY{n}{ds\PYZus{}temp\PYZus{}a} \PY{o}{=} \PY{n}{df\PYZus{}temp\PYZus{}f}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{AdjClose\PYZus{}Normalized}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Name the Pandas Series}
\PY{n}{ds\PYZus{}temp\PYZus{}a}\PY{o}{.}\PY{n}{name} \PY{o}{=} \PY{n}{ticker}
\PY{c+c1}{\PYZsh{} Rename the column in the Pandas Series.}
\PY{n}{ds\PYZus{}temp\PYZus{}a}\PY{o}{.}\PY{n}{columns} \PY{o}{=} \PY{p}{[}\PY{n}{ticker}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Append the Pandas Series to the list.}
\PY{n}{list\PYZus{}of\PYZus{}series\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{append}\PY{p}{(} \PY{n}{ds\PYZus{}temp\PYZus{}a} \PY{p}{)}
\PY{c+c1}{\PYZsh{} Make Pandas DataFrame from Pandas Series.}
\PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors} \PY{o}{=} \PY{n}{pd}\PY{o}{.}\PY{n}{concat}\PY{p}{(}\PY{n}{list\PYZus{}of\PYZus{}series\PYZus{}for\PYZus{}semiconductors}\PY{p}{,} \PY{n}{axis} \PY{o}{=} \PY{l+m+mi}{1}\PY{p}{)}
\end{Verbatim}
\subsubsection{Debug}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}290}]:} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}software}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}290}]:} ADBE ADP ADSK AKAM ANSS ATVI \textbackslash{}
Date
2017-01-18 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
2017-01-19 1.009192 1.000000 1.000000 0.991092 1.000000 0.994371
2017-01-20 1.017649 1.002622 1.021130 0.993534 0.997968 0.997185
2017-01-23 1.020039 0.992522 1.019505 0.976580 1.000214 0.985926
2017-01-24 1.045317 1.002622 1.025256 0.973276 1.010589 1.000512
2017-01-25 1.050188 0.997378 1.033383 0.987500 1.015617 1.012282
2017-01-26 1.037595 0.990968 1.020755 0.973851 1.014547 1.009980
2017-01-27 1.047799 0.987763 1.022256 0.976580 1.004920 1.013562
2017-01-30 1.046236 0.990288 1.014754 0.974282 0.999893 1.013562
2017-01-31 1.042191 0.980771 1.017004 0.985489 0.997540 1.028915
2017-02-01 1.042008 0.925027 1.016254 0.987069 0.997326 1.035568
2017-02-02 1.040169 0.935418 1.034759 0.997126 1.003851 1.035568
2017-02-03 1.058645 0.940759 1.055389 0.999282 1.017970 1.023797
2017-02-06 1.052119 0.936875 1.035509 1.001580 1.016686 1.028403
2017-02-07 1.056715 0.935127 1.056514 1.021839 1.018933 1.024821
2017-02-08 1.067469 0.937943 1.036884 0.913075 1.018612 1.002815
2017-02-09 1.070319 0.948334 1.048762 0.917385 1.030271 1.016633
2017-02-10 1.074088 0.949985 1.040635 0.918822 1.036047 1.208547
2017-02-13 1.081441 0.957366 1.053763 0.913793 1.039362 1.169396
2017-02-14 1.080798 0.965718 1.057014 0.913075 1.052091 1.150972
2017-02-15 1.091369 0.967758 1.054764 0.918103 1.063857 1.163767
2017-02-16 1.093207 0.969991 1.064516 0.922989 1.074233 1.161464
2017-02-17 1.100009 0.968049 1.080270 0.895690 1.072521 1.159928
2017-02-21 1.099642 0.963582 1.089647 0.900000 1.086961 1.160184
2017-02-22 1.098171 0.975236 1.080020 0.905316 1.081185 1.157369
2017-02-23 1.092288 0.987084 1.089022 0.900862 1.114023 1.153531
2017-02-24 1.096700 1.000291 1.094649 0.903305 1.117767 1.165558
2017-02-27 1.091828 0.997378 1.097024 0.903161 1.147182 1.169140
2017-02-28 1.087784 0.996601 1.079020 0.899425 1.141940 1.154811
2017-03-01 1.106260 1.015150 1.111903 0.910632 1.159054 1.206244
{\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 1.595000 1.144301 1.336834 0.801293 1.547973 1.563876
2017-12-07 1.605019 1.143906 1.370468 0.810920 1.564980 1.584985
2017-12-08 1.595459 1.145288 1.339835 0.811782 1.563055 1.607124
2017-12-11 1.602261 1.163057 1.335709 0.812500 1.565836 1.626173
2017-12-12 1.585991 1.155456 1.329457 0.819253 1.554498 1.657837
2017-12-13 1.625425 1.155160 1.325581 0.814080 1.548401 1.662985
2017-12-14 1.608604 1.149632 1.328457 0.815517 1.545727 1.674055
2017-12-15 1.631492 1.166710 1.355339 0.829885 1.583378 1.717818
2017-12-18 1.624690 1.171152 1.343836 0.943534 1.578672 1.699283
2017-12-19 1.608328 1.168388 1.317704 0.960057 1.579099 1.670708
2017-12-20 1.601710 1.163452 1.313203 0.957328 1.576104 1.657322
2017-12-21 1.604559 1.158911 1.305701 0.945977 1.574821 1.675342
2017-12-22 1.608604 1.153778 1.298950 0.940374 1.568724 1.660669
2017-12-26 1.603456 1.160589 1.297824 0.939943 1.565836 1.633381
2017-12-27 1.611913 1.157529 1.307577 0.936638 1.568724 1.630549
2017-12-28 1.613659 1.158023 1.313703 0.940086 1.579313 1.632094
2017-12-29 1.610810 1.156838 1.310703 0.934483 1.578672 1.630035
2018-01-02 1.633422 1.144992 1.339335 0.941954 1.588940 1.655520
2018-01-03 1.664124 1.157430 1.367592 0.947414 1.619638 1.681263
2018-01-04 1.684162 1.168486 1.401225 0.942529 1.623917 1.664530
2018-01-05 1.703649 1.167795 1.385846 0.945833 1.630656 1.708550
2018-01-08 1.700892 1.164242 1.393098 0.946552 1.644133 1.715243
2018-01-09 1.716150 1.172336 1.401725 0.969684 1.659429 1.703916
2018-01-10 1.719919 1.161379 1.393723 0.953161 1.625949 1.720392
2018-01-11 1.736557 1.156739 1.416104 0.936925 1.627554 1.782432
2018-01-12 1.792904 1.169474 1.449237 0.941379 1.649588 1.811264
2018-01-16 1.769096 1.178555 1.400100 0.926724 1.640710 1.768531
2018-01-17 1.806140 1.204813 1.412853 0.939655 1.665954 1.811264
2018-01-18 1.803475 1.193461 1.407227 0.935057 1.693657 1.796848
2018-01-19 1.799154 1.198792 1.441485 0.942960 1.727885 1.816670
CA CDNS CRM CTXS {\ldots} NTAP \textbackslash{}
Date {\ldots}
2017-01-18 1.000000 1.000000 1.000000 1.000000 {\ldots} 1.000000
2017-01-19 1.000000 1.000000 1.000000 1.000000 {\ldots} 1.000000
2017-01-20 1.000614 0.998845 1.006087 1.003886 {\ldots} 1.015071
2017-01-23 0.996928 0.988448 1.007807 0.998057 {\ldots} 1.009489
2017-01-24 1.012289 1.010012 1.020378 1.018784 {\ldots} 1.047167
2017-01-25 0.961905 1.011167 1.038375 1.033574 {\ldots} 1.059168
2017-01-26 0.962826 0.999615 1.031494 0.966426 {\ldots} 1.049400
2017-01-27 0.956375 1.009626 1.034934 0.974414 {\ldots} 1.053865
2017-01-30 0.956375 1.005776 1.041551 0.983807 {\ldots} 1.062517
2017-01-31 0.960676 1.002310 1.046712 0.984454 {\ldots} 1.069495
2017-02-01 0.950845 0.994224 1.039831 0.961572 {\ldots} 1.061959
2017-02-02 0.965591 1.113978 1.060341 1.011648 {\ldots} 1.067262
2017-02-03 0.978802 1.121679 1.061400 1.035269 {\ldots} 1.068378
2017-02-06 0.978187 1.131305 1.055313 1.029870 {\ldots} 1.070611
2017-02-07 0.967435 1.131305 1.061400 1.048092 {\ldots} 1.083729
2017-02-08 0.962826 1.125915 1.067884 1.048632 {\ldots} 1.087357
2017-02-09 0.970507 1.137851 1.072251 1.058485 {\ldots} 1.097125
2017-02-10 0.976037 1.137081 1.067090 1.060240 {\ldots} 1.101033
2017-02-13 0.978495 1.148248 1.070663 1.063344 {\ldots} 1.104661
2017-02-14 0.980799 1.152099 1.070795 1.065369 {\ldots} 1.102986
2017-02-15 0.981418 1.159030 1.080720 1.081971 {\ldots} 1.086520
2017-02-16 0.987612 1.164420 1.069472 1.079001 {\ldots} 1.132012
2017-02-17 0.993806 1.162880 1.078073 1.086560 {\ldots} 1.121128
2017-02-21 0.995664 1.185599 1.087469 1.090204 {\ldots} 1.130896
2017-02-22 0.997832 1.188294 1.086145 1.082106 {\ldots} 1.135920
2017-02-23 0.998761 1.185214 1.086013 1.078866 {\ldots} 1.142618
2017-02-24 1.010529 1.204852 1.082175 1.082241 {\ldots} 1.142897
2017-02-27 1.008671 1.214478 1.079000 1.081971 {\ldots} 1.151828
2017-02-28 0.999380 1.189834 1.076485 1.065639 {\ldots} 1.167457
2017-03-01 1.014246 1.205622 1.109038 1.090204 {\ldots} 1.200391
{\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 1.047782 1.651906 1.361916 1.179559 {\ldots} 1.619451
2017-12-07 1.043691 1.676935 1.377134 1.176589 {\ldots} 1.628510
2017-12-08 1.053445 1.677320 1.368797 1.182258 {\ldots} 1.649174
2017-12-11 1.048726 1.675010 1.383353 1.174565 {\ldots} 1.658232
2017-12-12 1.053760 1.666923 1.371841 1.177264 {\ldots} 1.636152
2017-12-13 1.049670 1.658067 1.375414 1.176319 {\ldots} 1.635586
2017-12-14 1.044321 1.663073 1.379383 1.174969 {\ldots} 1.623980
2017-12-15 1.060682 1.681941 1.395792 1.181448 {\ldots} 1.621150
2017-12-18 1.061626 1.677320 1.390499 1.184688 {\ldots} 1.634171
2017-12-19 1.058795 1.685021 1.379648 1.185903 {\ldots} 1.639266
2017-12-20 1.055648 1.663843 1.368136 1.189682 {\ldots} 1.614639
2017-12-21 1.054389 1.635734 1.371179 1.189682 {\ldots} 1.591144
2017-12-22 1.054704 1.627262 1.358079 1.186172 {\ldots} 1.593692
2017-12-26 1.058795 1.616095 1.356888 1.186847 {\ldots} 1.581237
2017-12-27 1.054075 1.626877 1.358343 1.193731 {\ldots} 1.589446
2017-12-28 1.054075 1.628417 1.360196 1.196296 {\ldots} 1.579538
2017-12-29 1.047152 1.610320 1.352785 1.187792 {\ldots} 1.565951
2018-01-02 1.056277 1.619176 1.381633 1.197106 {\ldots} 1.573028
2018-01-03 1.060682 1.653061 1.393278 1.210063 {\ldots} 1.615488
2018-01-04 1.062885 1.678090 1.411671 1.224236 {\ldots} 1.636436
2018-01-05 1.070751 1.693878 1.430462 1.234764 {\ldots} 1.665875
2018-01-08 1.065717 1.729688 1.440519 1.227880 {\ldots} 1.688521
2018-01-09 1.062256 1.735464 1.444356 1.230850 {\ldots} 1.689087
2018-01-10 1.053445 1.726608 1.439725 1.225856 {\ldots} 1.716828
2018-01-11 1.064144 1.727763 1.443695 1.212223 {\ldots} 1.749664
2018-01-12 1.072639 1.735464 1.458780 1.215327 {\ldots} 1.776839
2018-01-16 1.070751 1.705044 1.440916 1.214248 {\ldots} 1.762685
2018-01-17 1.084596 1.727378 1.456133 1.230850 {\ldots} 1.800051
2018-01-18 1.075786 1.730458 1.479026 1.233414 {\ldots} 1.791842
2018-01-19 1.072010 1.755872 1.481143 1.251501 {\ldots} 1.785897
ORCL PAYX RHT SNPS SYMC TSS \textbackslash{}
Date
2017-01-18 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
2017-01-19 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
2017-01-20 1.016832 1.009360 1.003788 0.997702 1.003389 1.000937
2017-01-23 1.011987 1.003448 1.001894 0.998687 1.007530 0.994005
2017-01-24 1.022698 1.015928 1.020427 1.024626 1.029744 0.996441
2017-01-25 1.023973 1.011823 1.033956 1.029059 1.026355 0.968715
2017-01-26 1.023463 1.009031 1.017992 1.021179 1.024473 0.960097
2017-01-27 1.026014 1.004433 1.024080 1.026104 1.029744 0.960659
2017-01-30 1.026014 1.006076 1.021510 1.028074 1.026732 0.960472
2017-01-31 1.022953 0.997472 1.026515 1.032507 1.037274 0.949419
2017-02-01 1.016067 0.968188 1.029085 1.028074 1.025979 0.942675
2017-02-02 1.019638 0.968685 1.051272 1.053686 1.045181 0.955414
2017-02-03 1.031115 0.959089 1.056953 1.065506 1.069654 0.990446
2017-02-06 1.022698 0.953298 1.058171 1.069939 1.064006 0.989697
2017-02-07 1.021933 0.952471 1.066829 1.070432 1.070783 1.001311
2017-02-08 1.020658 0.953795 1.064123 1.063701 1.077184 1.003934
2017-02-09 1.026014 0.967692 1.072917 1.074372 1.091491 1.005807
2017-02-10 1.040296 0.967196 1.072105 1.071909 1.092620 1.003934
2017-02-13 1.047947 0.976792 1.079140 1.083238 1.094127 1.012177
2017-02-14 1.048202 0.980597 1.083469 1.080939 1.079819 1.011802
2017-02-15 1.056108 0.982417 1.096456 1.087835 1.076619 1.016486
2017-02-16 1.060699 0.984237 1.102949 1.160072 1.074731 1.010678
2017-02-17 1.072686 0.978281 1.117424 1.157610 1.087189 1.020420
2017-02-21 1.078041 0.977784 1.132982 1.165326 1.087566 1.031285
2017-02-22 1.084162 0.986057 1.145969 1.169923 1.079639 1.030910
2017-02-23 1.095639 0.999458 1.144481 1.169266 1.078884 1.032784
2017-02-24 1.100995 1.022620 1.147998 1.183057 1.088699 1.033720
2017-02-27 1.089008 1.017988 1.133387 1.186341 1.082659 1.030161
2017-02-28 1.086202 1.016168 1.120265 1.172878 1.078506 1.020607
2017-03-01 1.094619 1.039165 1.126082 1.186012 1.098136 1.035594
{\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 1.249800 1.150657 1.649621 1.455426 1.040952 1.409261
2017-12-07 1.252381 1.152675 1.674784 1.483008 1.048561 1.430359
2017-12-08 1.280257 1.160749 1.694940 1.485963 1.064541 1.442604
2017-12-11 1.302713 1.160917 1.712933 1.470366 1.111718 1.443357
2017-12-12 1.300648 1.155703 1.701163 1.437859 1.100304 1.450516
2017-12-13 1.291872 1.157553 1.711174 1.448859 1.089271 1.441097
2017-12-14 1.295486 1.149311 1.706304 1.444426 1.093836 1.443546
2017-12-15 1.246702 1.173532 1.742154 1.442456 1.102587 1.465774
2017-12-18 1.231473 1.181605 1.751082 1.443441 1.090032 1.488380
2017-12-19 1.234313 1.164954 1.743236 1.450829 1.077096 1.500813
2017-12-20 1.236377 1.163944 1.650433 1.429979 1.071389 1.506841
2017-12-21 1.220374 1.154525 1.665720 1.419800 1.083564 1.479903
2017-12-22 1.222439 1.148302 1.662879 1.408964 1.077476 1.484424
2017-12-26 1.224246 1.158057 1.631629 1.401248 1.074052 1.490264
2017-12-27 1.222955 1.150488 1.639881 1.407158 1.082422 1.493843
2017-12-28 1.226569 1.149479 1.638663 1.406173 1.083564 1.501378
2017-12-29 1.220374 1.145106 1.624729 1.399442 1.067584 1.489887
2018-01-02 1.203597 1.132155 1.637446 1.412412 1.099924 1.478396
2018-01-03 1.231473 1.148470 1.664773 1.429322 1.099924 1.484047
2018-01-04 1.243605 1.159235 1.677219 1.442949 1.102207 1.496103
2018-01-05 1.251090 1.155703 1.679383 1.460023 1.122752 1.505899
2018-01-08 1.264254 1.148470 1.693858 1.473650 1.125035 1.528505
2018-01-09 1.266319 1.140733 1.679518 1.472500 1.103728 1.520028
2018-01-10 1.259608 1.136023 1.684389 1.466754 1.090412 1.535098
2018-01-11 1.263480 1.124586 1.706710 1.472829 1.094597 1.548096
2018-01-12 1.277934 1.138042 1.703869 1.483664 1.096500 1.544705
2018-01-16 1.279999 1.148134 1.685471 1.465605 1.060165 1.548849
2018-01-17 1.297551 1.171345 1.712662 1.493679 1.047039 1.562413
2018-01-18 1.296519 1.159067 1.697376 1.501724 1.036767 1.558457
2018-01-19 1.305553 1.168822 1.703734 1.520440 1.044756 1.568252
V VRSN WU
Date
2017-01-18 1.000000 1.000000 1.000000
2017-01-19 1.000000 1.000000 1.000000
2017-01-20 1.001346 1.000498 0.992901
2017-01-23 1.005139 0.997512 0.980596
2017-01-24 1.018353 1.002115 0.966398
2017-01-25 1.026551 1.011570 0.950308
2017-01-26 1.018475 1.014556 0.927118
2017-01-27 1.024960 1.010450 0.927118
2017-01-30 1.024104 1.005723 0.928064
2017-01-31 1.011991 0.997885 0.926645
2017-02-01 1.008687 0.998134 0.924752
2017-02-02 1.006974 1.016422 0.933743
2017-02-03 1.053224 1.021523 0.945102
2017-02-06 1.050165 1.022394 0.938476
2017-02-07 1.049553 1.023389 0.938949
2017-02-08 1.041111 1.027246 0.942262
2017-02-09 1.047106 1.026624 0.964505
2017-02-10 1.051022 1.034337 0.934217
2017-02-13 1.057629 1.031351 0.924278
2017-02-14 1.062645 1.028614 0.931377
2017-02-15 1.073107 1.030729 0.927118
2017-02-16 1.071513 1.019159 0.925225
2017-02-17 1.072126 1.023887 0.938003
2017-02-21 1.077765 1.032346 0.937530
2017-02-22 1.076294 1.022518 0.946048
2017-02-23 1.080952 1.025131 0.942735
2017-02-24 1.084017 1.036452 0.954567
2017-02-27 1.078745 1.026872 0.952674
2017-02-28 1.078010 1.026001 0.929484
2017-03-01 1.090881 1.046529 0.942262
{\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 1.349816 1.398358 0.943150
2017-12-07 1.370234 1.403583 0.948984
2017-12-08 1.384994 1.405574 0.942664
2017-12-11 1.382165 1.422244 0.949470
2017-12-12 1.395572 1.422866 0.948012
2017-12-13 1.393727 1.415526 0.941692
2017-12-14 1.388930 1.420005 0.940719
2017-12-15 1.400000 1.431077 0.947039
2017-12-18 1.396679 1.444389 0.950929
2017-12-19 1.379336 1.437547 0.952387
2017-12-20 1.379090 1.443269 0.946553
2017-12-21 1.382657 1.420627 0.936344
2017-12-22 1.386101 1.418139 0.927593
2017-12-26 1.389791 1.417144 0.918356
2017-12-27 1.402460 1.422991 0.913981
2017-12-28 1.406519 1.436925 0.927107
2017-12-29 1.402460 1.423737 0.924190
2018-01-02 1.408487 1.361657 0.927593
2018-01-03 1.422510 1.379696 0.927107
2018-01-04 1.427799 1.386788 0.945581
2018-01-05 1.461993 1.405822 1.001489
2018-01-08 1.467897 1.422120 1.044758
2018-01-09 1.465068 1.406071 1.022880
2018-01-10 1.463469 1.392884 1.032117
2018-01-11 1.474047 1.396243 1.029687
2018-01-12 1.477122 1.413536 1.026770
2018-01-16 1.480812 1.401468 1.002462
2018-01-17 1.500369 1.418263 0.983988
2018-01-18 1.514268 1.424981 0.981557
2018-01-19 1.509225 1.424235 0.983988
[253 rows x 31 columns]
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}291}]:} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}291}]:} ADI AMAT AMD AVGO INTC KLAC \textbackslash{}
Date
2017-01-18 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
2017-01-19 1.000000 1.000000 1.000000 1.000000 0.994831 1.000000
2017-01-20 1.006074 1.002667 0.997953 1.029748 1.004897 1.005052
2017-01-23 1.004694 1.000000 1.014330 1.029317 1.000272 1.011707
2017-01-24 1.014771 1.008296 1.068577 1.065693 1.023395 1.020333
2017-01-25 1.028990 1.019852 1.059365 1.091183 1.028292 1.043376
2017-01-26 1.024296 1.006815 1.076766 1.100291 1.021763 1.025878
2017-01-27 1.061982 1.038222 1.092119 1.108590 1.033188 1.061738
2017-01-30 1.053009 1.017481 1.085977 1.095171 1.017954 1.062847
2017-01-31 1.034511 1.014815 1.061412 1.075124 1.001632 1.048799
2017-02-01 1.051491 1.037926 1.234391 1.097704 0.993471 1.060259
2017-02-02 1.038515 1.035259 1.256909 1.099644 0.997824 1.076525
2017-02-03 1.042518 1.046222 1.252815 1.111015 1.000544 1.079359
2017-02-06 1.042656 1.042370 1.395087 1.113171 0.993695 1.072089
2017-02-07 1.053424 1.053037 1.360287 1.109668 0.995887 1.080222
2017-02-08 1.069575 1.054815 1.387922 1.114949 0.996708 1.070487
2017-02-09 1.056184 1.049185 1.373593 1.106596 0.971503 1.068885
2017-02-10 1.056461 1.046519 1.389969 1.107566 0.968215 1.067652
2017-02-13 1.075649 1.050667 1.380757 1.113979 0.980818 1.081454
2017-02-14 1.074544 1.043852 1.357216 1.106542 0.984380 1.078602
2017-02-15 1.126449 1.051556 1.361310 1.110099 0.987667 1.087158
2017-02-16 1.131557 1.042370 1.327533 1.121901 0.997530 1.097697
2017-02-17 1.138597 1.059852 1.343910 1.133919 0.999448 1.096829
2017-02-21 1.143291 1.086890 1.432958 1.146529 1.000544 1.121503
2017-02-22 1.139012 1.084811 1.461617 1.154128 0.988215 1.119272
2017-02-23 1.132765 1.074411 1.465711 1.134835 0.991229 1.114684
2017-02-24 1.133737 1.078571 1.445241 1.133703 1.000818 1.119519
2017-02-27 1.142065 1.082434 1.555783 1.149386 1.000270 1.118652
2017-02-28 1.137207 1.076194 1.480041 1.136721 0.991777 1.117412
2017-03-01 1.162608 1.094913 1.531218 1.159409 0.984380 1.133779
{\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 1.198724 1.522437 1.023541 1.440632 1.222338 1.289115
2017-12-07 1.208549 1.560938 1.027636 1.440632 1.211929 1.312202
2017-12-08 1.200268 1.534972 1.017400 1.418905 1.219525 1.304673
2017-12-11 1.204198 1.540643 1.039918 1.419123 1.228245 1.303042
2017-12-12 1.199707 1.506320 1.013306 1.412299 1.218962 1.293130
2017-12-13 1.195496 1.514975 1.034800 1.430478 1.219243 1.303293
2017-12-14 1.199145 1.531391 1.036847 1.415793 1.216993 1.310194
2017-12-15 1.216689 1.568101 1.054248 1.450950 1.253564 1.325376
2017-12-18 1.236338 1.593768 1.123849 1.443362 1.301389 1.395516
2017-12-19 1.234373 1.580636 1.120778 1.439595 1.323332 1.400535
2017-12-20 1.244197 1.590485 1.123849 1.450186 1.337960 1.410949
2017-12-21 1.244197 1.548701 1.114637 1.427749 1.315455 1.362893
2017-12-22 1.247004 1.553775 1.078813 1.432225 1.313767 1.371676
2017-12-26 1.243917 1.523631 1.070624 1.409023 1.296325 1.350722
2017-12-27 1.250513 1.542434 1.077789 1.414592 1.297169 1.346079
2017-12-28 1.254443 1.543627 1.079836 1.421689 1.300263 1.353231
2017-12-29 1.249531 1.525720 1.052201 1.402472 1.298576 1.318350
2018-01-02 1.267074 1.583322 1.123849 1.457665 1.317424 1.331650
2018-01-03 1.282794 1.611079 1.182190 1.473606 1.273257 1.347209
2018-01-04 1.281390 1.620331 1.240532 1.474097 1.249907 1.349593
2018-01-05 1.286583 1.629583 1.215967 1.482832 1.258628 1.378452
2018-01-08 1.288829 1.668980 1.256909 1.486381 1.258628 1.382216
2018-01-09 1.286162 1.637343 1.209826 1.465799 1.227120 1.370170
2018-01-10 1.264688 1.590784 1.224156 1.434846 1.195612 1.333532
2018-01-11 1.279846 1.587799 1.242620 1.438503 1.221212 1.333030
2018-01-12 1.291495 1.595261 1.230297 1.442816 1.215586 1.346205
2018-01-16 1.306653 1.626599 1.219038 1.437138 1.213617 1.350847
2018-01-17 1.341319 1.711361 1.246673 1.451114 1.248782 1.423999
2018-01-18 1.364758 1.713152 1.276356 1.472896 1.251314 1.438553
2018-01-19 1.349319 1.713152 1.288639 1.454280 1.260879 1.443196
LRCX MCHP MU NVDA QCOM QRVO \textbackslash{}
Date
2017-01-18 1.000000 1.000000 1.000000 1.000000 1.000000 1.000000
2017-01-19 1.000000 1.000000 1.000000 1.000000 0.989406 1.000000
2017-01-20 1.013623 1.018877 1.011515 0.989064 0.965454 1.046476
2017-01-23 1.017412 1.020399 1.008291 0.999334 0.842622 1.060710
2017-01-24 1.034284 1.033643 1.052510 1.020635 0.844465 1.071857
2017-01-25 1.054854 1.047648 1.085214 1.025010 0.873637 1.098268
2017-01-26 1.031487 1.029533 1.083372 1.042697 0.829879 1.086263
2017-01-27 1.066763 1.046735 1.104099 1.062857 0.832796 1.091579
2017-01-30 1.046554 1.051302 1.113772 1.046215 0.823123 1.102555
2017-01-31 1.036268 1.025270 1.110548 1.038227 0.820359 1.101183
2017-02-01 1.062252 1.041407 1.140028 1.083587 0.816060 1.107014
2017-02-02 1.057651 1.047039 1.141870 1.097280 0.808537 1.086435
2017-02-03 1.061440 1.056934 1.133118 1.087676 0.813450 1.096896
2017-02-06 1.057019 1.052976 1.121142 1.115538 0.811915 1.103756
2017-02-07 1.055576 1.059826 1.133118 1.132845 0.817903 1.110444
2017-02-08 1.045381 1.123459 1.115154 1.127900 0.812068 1.135311
2017-02-09 1.046554 1.079769 1.126209 1.106695 0.811915 1.136683
2017-02-10 1.046463 1.079312 1.107784 1.080449 0.829111 1.136512
2017-02-13 1.048899 1.075658 1.100875 1.030620 0.843390 1.140971
2017-02-14 1.043215 1.077485 1.064947 1.034424 0.851835 1.139427
2017-02-15 1.048584 1.094535 1.060341 1.036516 0.867342 1.151946
2017-02-16 1.033652 1.097138 1.058498 1.019874 0.873330 1.151603
2017-02-17 1.036629 1.096679 1.075541 1.019684 0.866882 1.151089
2017-02-21 1.062974 1.108919 1.094887 1.056200 0.871334 1.158806
2017-02-22 1.069830 1.117487 1.093966 1.054583 0.876708 1.158463
2017-02-23 1.062162 1.112438 1.081529 0.956799 0.877322 1.152461
2017-02-24 1.071725 1.114274 1.070474 0.966035 0.878551 1.135140
2017-02-27 1.076236 1.118864 1.094427 0.994123 0.879165 1.159664
2017-02-28 1.069470 1.109531 1.079687 0.966225 0.875290 1.133596
2017-03-01 1.079664 1.126666 1.130815 0.978698 0.883504 1.163608
{\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 1.682161 1.337631 1.915246 1.805418 1.036756 1.202195
2017-12-07 1.727175 1.354369 1.989866 1.831460 1.040745 1.178529
2017-12-08 1.702522 1.354679 1.990327 1.826690 1.024950 1.164123
2017-12-11 1.694305 1.354369 1.981115 1.856930 1.039947 1.176814
2017-12-12 1.657873 1.330347 1.928144 1.820490 1.034842 1.160864
2017-12-13 1.655499 1.339026 1.936895 1.776036 1.035480 1.154004
2017-12-14 1.662712 1.324613 1.945647 1.778803 1.032289 1.118676
2017-12-15 1.702705 1.344451 1.953017 1.827358 1.033246 1.123821
2017-12-18 1.726354 1.378856 2.013358 1.887838 1.043138 1.149889
2017-12-19 1.699327 1.388465 2.025795 1.870762 1.029098 1.145430
2017-12-20 1.724162 1.398229 2.107324 1.877344 1.030694 1.150232
2017-12-21 1.695035 1.386450 2.046062 1.868664 1.027343 1.157263
2017-12-22 1.704622 1.378081 2.032243 1.862749 1.032768 1.160864
2017-12-26 1.682891 1.373587 1.946108 1.883449 1.025907 1.130166
2017-12-27 1.692570 1.370022 1.956702 1.880874 1.029736 1.153490
2017-12-28 1.694122 1.373277 1.925841 1.883068 1.027183 1.155205
2017-12-29 1.680700 1.361963 1.894058 1.845864 1.021440 1.142171
2018-01-02 1.728271 1.401173 2.011515 1.901670 1.040267 1.181273
2018-01-03 1.757672 1.422250 2.071856 2.026826 1.052073 1.172869
2018-01-04 1.765707 1.425660 2.159374 2.037510 1.053509 1.178529
2018-01-05 1.793282 1.429844 2.109627 2.054776 1.060529 1.166695
2018-01-08 1.806796 1.435424 2.098111 2.117736 1.057338 1.180587
2018-01-09 1.803965 1.433719 1.979272 2.117164 1.041383 1.183330
2018-01-10 1.750002 1.409542 1.994012 2.133762 1.041224 1.161722
2018-01-11 1.727258 1.427520 1.972363 2.136996 1.043936 1.181273
2018-01-12 1.721240 1.448132 1.971902 2.124976 1.043138 1.218316
2018-01-16 1.738406 1.446427 1.976969 2.099707 1.088929 1.188475
2018-01-17 1.872537 1.488117 2.039152 2.143683 1.085260 1.215057
2018-01-18 1.875916 1.507799 2.026255 2.141012 1.085738 1.212485
2018-01-19 1.894908 1.510279 1.969139 2.195100 1.085579 1.173041
SWKS TXN XLNX
Date
2017-01-18 1.000000 1.000000 1.000000
2017-01-19 1.000000 1.000000 1.000000
2017-01-20 1.130130 1.011776 1.000862
2017-01-23 1.149248 1.025041 0.999655
2017-01-24 1.176013 1.043313 1.016126
2017-01-25 1.170915 1.063617 1.027251
2017-01-26 1.159954 1.072821 0.989479
2017-01-27 1.172699 1.062940 1.008796
2017-01-30 1.185190 1.061169 1.019317
2017-01-31 1.172827 1.029021 1.003794
2017-02-01 1.172827 1.038965 0.989824
2017-02-02 1.164773 1.033788 1.002760
2017-02-03 1.170526 1.042098 1.004139
2017-02-06 1.168352 1.038284 1.010003
2017-02-07 1.173338 1.037739 1.007575
2017-02-08 1.172188 1.034606 1.010003
2017-02-09 1.182159 1.025751 1.004973
2017-02-10 1.178836 1.023844 1.006188
2017-02-13 1.184205 1.028748 1.032205
2017-02-14 1.173594 1.030383 1.019023
2017-02-15 1.177813 1.030792 1.021972
2017-02-16 1.181648 1.038692 1.033940
2017-02-17 1.231762 1.041281 1.041745
2017-02-21 1.234575 1.049999 1.045387
2017-02-22 1.236365 1.052042 1.037409
2017-02-23 1.218467 1.050816 1.022318
2017-02-24 1.210668 1.052178 1.023186
2017-02-27 1.222558 1.051497 1.023012
2017-02-28 1.212075 1.043733 1.020237
2017-03-01 1.236748 1.064575 1.035154
{\ldots} {\ldots} {\ldots} {\ldots}
2017-12-06 1.244139 1.346651 1.199296
2017-12-07 1.246848 1.357200 1.209993
2017-12-08 1.241689 1.360532 1.201926
2017-12-11 1.250717 1.371774 1.199120
2017-12-12 1.244139 1.366222 1.186845
2017-12-13 1.236272 1.372191 1.192983
2017-12-14 1.213445 1.391484 1.193684
2017-12-15 1.220410 1.404948 1.192106
2017-12-18 1.236014 1.433402 1.219462
2017-12-19 1.233951 1.444923 1.224021
2017-12-20 1.256520 1.454639 1.209817
2017-12-21 1.257294 1.444506 1.198945
2017-12-22 1.251877 1.445339 1.191054
2017-12-26 1.227761 1.445617 1.185442
2017-12-27 1.235757 1.450891 1.190878
2017-12-28 1.245042 1.454917 1.201225
2017-12-29 1.224536 1.449642 1.182285
2018-01-02 1.269417 1.465327 1.190352
2018-01-03 1.288762 1.505024 1.214201
2018-01-04 1.299595 1.503081 1.236121
2018-01-05 1.305527 1.514601 1.300304
2018-01-08 1.305785 1.521819 1.308896
2018-01-09 1.298434 1.532229 1.312404
2018-01-10 1.271609 1.522652 1.299427
2018-01-11 1.287988 1.536115 1.303635
2018-01-12 1.305269 1.564570 1.309247
2018-01-16 1.287859 1.567346 1.293640
2018-01-17 1.317908 1.654235 1.330290
2018-01-18 1.319198 1.615787 1.338006
2018-01-19 1.279347 1.621617 1.327660
[253 rows x 15 columns]
\end{Verbatim}
\subsubsection{Minimized Sum of Squared Deviations within the Main
Industry Only -
Software}\label{minimized-sum-of-squared-deviations-within-the-main-industry-only---software}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}336}]:} \PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/39203662/euclidean\PYZhy{}distance\PYZhy{}matrix\PYZhy{}using\PYZhy{}pandas}
\PY{c+c1}{\PYZsh{}pd.DataFrame( , columns = df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors.columns, index = df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors.index)}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/41337316/pandas\PYZhy{}compare\PYZhy{}all\PYZhy{}dataframe\PYZhy{}columns\PYZhy{}with\PYZhy{}eachother}
\PY{n}{df} \PY{o}{=} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{copy}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} This is a numpy array}
\PY{n}{ndarray\PYZus{}a} \PY{o}{=} \PY{n}{df}\PY{o}{.}\PY{n}{values}
\PY{n}{column\PYZus{}names} \PY{o}{=} \PY{n}{df}\PY{o}{.}\PY{n}{columns}
\PY{c+c1}{\PYZsh{} Multiplying every column by every other column}
\PY{c+c1}{\PYZsh{}list\PYZus{}for\PYZus{}matrix\PYZus{}a = []}
\PY{n}{dict\PYZus{}pairs} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{i} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{ndarray\PYZus{}a}\PY{o}{.}\PY{n}{T}\PY{p}{)}\PY{p}{)}\PY{p}{:}
\PY{n}{column\PYZus{}name\PYZus{}in\PYZus{}focus} \PY{o}{=} \PY{n}{column\PYZus{}names}\PY{p}{[}\PY{n}{i}\PY{p}{]}
\PY{n}{column\PYZus{}values\PYZus{}in\PYZus{}focus} \PY{o}{=} \PY{n}{ndarray\PYZus{}a}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{n}{i}\PY{p}{]}
\PY{c+c1}{\PYZsh{} The other columns}
\PY{k}{for} \PY{n}{j} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{ndarray\PYZus{}a}\PY{o}{.}\PY{n}{T}\PY{p}{)}\PY{p}{)}\PY{p}{:}
\PY{c+c1}{\PYZsh{} Skip comparison to itself }
\PY{k}{if} \PY{n}{j} \PY{o}{==} \PY{n}{i}\PY{p}{:}
\PY{k}{continue}
\PY{n}{column\PYZus{}name\PYZus{}other} \PY{o}{=} \PY{n}{column\PYZus{}names}\PY{p}{[}\PY{n}{j}\PY{p}{]}
\PY{n}{column\PYZus{}values\PYZus{}other} \PY{o}{=} \PY{n}{ndarray\PYZus{}a}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{n}{j}\PY{p}{]}
\PY{c+c1}{\PYZsh{} value\PYZus{}a = column\PYZus{}values\PYZus{}in\PYZus{}focus.sum() \PYZhy{} column\PYZus{}values\PYZus{}other.sum()}
\PY{c+c1}{\PYZsh{} sum\PYZus{}squared\PYZus{}deviations = value\PYZus{}a**2}
\PY{c+c1}{\PYZsh{} These are numpy ndarrays}
\PY{n}{ndarray\PYZus{}temp\PYZus{}a} \PY{o}{=} \PY{n}{column\PYZus{}values\PYZus{}in\PYZus{}focus} \PY{o}{\PYZhy{}} \PY{n}{column\PYZus{}values\PYZus{}other}
\PY{n}{ndarray\PYZus{}temp\PYZus{}b} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{square}\PY{p}{(}\PY{n}{ndarray\PYZus{}temp\PYZus{}a}\PY{p}{)}
\PY{n}{sum\PYZus{}squared\PYZus{}deviations} \PY{o}{=} \PY{n}{ndarray\PYZus{}temp\PYZus{}b}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Put in a list so we can sort it.}
\PY{c+c1}{\PYZsh{} That way, we will have unique keys in the dictionary.}
\PY{c+c1}{\PYZsh{} (\PYZsq{}ADI\PYZsq{}, \PYZsq{}XLNX\PYZsq{}) is the same as (\PYZsq{}XLNX\PYZsq{}, \PYZsq{}ADI\PYZsq{}) in this case.}
\PY{c+c1}{\PYZsh{} Yes, the value for the key will be overriden, but it would be the}
\PY{c+c1}{\PYZsh{} same value.}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} After convert to a tuple. No real reason I can think of except convention.}
\PY{n}{list\PYZus{}pair\PYZus{}key} \PY{o}{=} \PY{n+nb}{sorted}\PY{p}{(}\PY{p}{[}\PY{n}{column\PYZus{}name\PYZus{}in\PYZus{}focus}\PY{p}{,} \PY{n}{column\PYZus{}name\PYZus{}other}\PY{p}{]}\PY{p}{)}
\PY{n}{tuple\PYZus{}pair\PYZus{}key} \PY{o}{=} \PY{n+nb}{tuple}\PY{p}{(}\PY{n}{list\PYZus{}pair\PYZus{}key}\PY{p}{)}
\PY{n}{dict\PYZus{}pairs}\PY{p}{[}\PY{n}{tuple\PYZus{}pair\PYZus{}key}\PY{p}{]} \PY{o}{=} \PY{n}{sum\PYZus{}squared\PYZus{}deviations}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}337}]:} \PY{n}{dict\PYZus{}pairs}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}337}]:} \{('ADBE', 'ADP'): 29.376129693592553,
('ADBE', 'ADSK'): 4.158609456332218,
('ADBE', 'AKAM'): 94.87267386981122,
('ADBE', 'ANSS'): 0.8696850922794794,
('ADBE', 'ATVI'): 6.517351640000458,
('ADBE', 'CA'): 36.511020170463276,
('ADBE', 'CDNS'): 1.2265447236409859,
('ADBE', 'CRM'): 7.085918839247967,
('ADBE', 'CTXS'): 23.967198766619457,
('ADBE', 'EA'): 6.204815344227672,
('ADBE', 'EBAY'): 16.506262595260367,
('ADBE', 'FB'): 4.966219754693422,
('ADBE', 'FIS'): 18.847753171034842,
('ADBE', 'FISV'): 19.019434180821282,
('ADBE', 'GOOG'): 12.213583359818005,
('ADBE', 'GOOGL'): 13.570674681293951,
('ADBE', 'INTU'): 9.254211425211263,
('ADBE', 'MA'): 7.418980673896577,
('ADBE', 'MSFT'): 8.58283144569894,
('ADBE', 'NFLX'): 6.259575001444054,
('ADBE', 'NTAP'): 7.682165352235385,
('ADBE', 'ORCL'): 10.948462323843168,
('ADBE', 'PAYX'): 35.64310708952115,
('ADBE', 'RHT'): 1.153945770702228,
('ADBE', 'SNPS'): 3.811917449321886,
('ADBE', 'SYMC'): 23.828023833314973,
('ADBE', 'TSS'): 8.092865305396348,
('ADBE', 'V'): 6.474267995072576,
('ADBE', 'VRSN'): 6.529076057006827,
('ADBE', 'WU'): 53.5264717931703,
('ADP', 'ADSK'): 20.28697737936668,
('ADP', 'AKAM'): 20.431752323892752,
('ADP', 'ANSS'): 23.84709906933113,
('ADP', 'ATVI'): 53.57235979823229,
('ADP', 'CA'): 1.036966431412505,
('ADP', 'CDNS'): 36.91168258814312,
('ADP', 'CRM'): 8.147196074456309,
('ADP', 'CTXS'): 1.9153141277466614,
('ADP', 'EA'): 21.35795561141269,
('ADP', 'EBAY'): 3.3230907679222694,
('ADP', 'FB'): 11.706227210933367,
('ADP', 'FIS'): 1.6134987587691578,
('ADP', 'FISV'): 1.8235753149414649,
('ADP', 'GOOG'): 4.618847052101154,
('ADP', 'GOOGL'): 3.942300296773336,
('ADP', 'INTU'): 6.380018278467871,
('ADP', 'MA'): 8.06633351721472,
('ADP', 'MSFT'): 6.495723204426145,
('ADP', 'NFLX'): 10.513119519449324,
('ADP', 'NTAP'): 14.013947246377636,
('ADP', 'ORCL'): 7.30246716338635,
('ADP', 'PAYX'): 0.8226419562575669,
('ADP', 'RHT'): 32.007280537056104,
('ADP', 'SNPS'): 13.84130036823279,
('ADP', 'SYMC'): 3.8265242380171927,
('ADP', 'TSS'): 8.179723739097994,
('ADP', 'V'): 8.77549584911996,
('ADP', 'VRSN'): 8.991016449671067,
('ADP', 'WU'): 4.0021985121418195,
('ADSK', 'AKAM'): 79.44756182547066,
('ADSK', 'ANSS'): 2.802816676496424,
('ADSK', 'ATVI'): 10.473166879619512,
('ADSK', 'CA'): 25.00253404565265,
('ADSK', 'CDNS'): 5.594513994395422,
('ADSK', 'CRM'): 4.2864926579759945,
('ADSK', 'CTXS'): 16.744195321732526,
('ADSK', 'EA'): 1.4927197476600735,
('ADSK', 'EBAY'): 9.288085300886838,
('ADSK', 'FB'): 2.345486843074066,
('ADSK', 'FIS'): 11.111437876124937,
('ADSK', 'FISV'): 11.457785548992558,
('ADSK', 'GOOG'): 7.189954599180913,
('ADSK', 'GOOGL'): 7.974748080038076,
('ADSK', 'INTU'): 5.3842964614956,
('ADSK', 'MA'): 4.727316247730728,
('ADSK', 'MSFT'): 5.841219057017932,
('ADSK', 'NFLX'): 3.651072813726262,
('ADSK', 'NTAP'): 10.686077929230263,
('ADSK', 'ORCL'): 5.08922384096354,
('ADSK', 'PAYX'): 26.287252771312566,
('ADSK', 'RHT'): 5.3463430031778,
('ADSK', 'SNPS'): 2.7797994513233686,
('ADSK', 'SYMC'): 13.912047655573508,
('ADSK', 'TSS'): 6.303920608506605,
('ADSK', 'V'): 3.866485990508485,
('ADSK', 'VRSN'): 3.838493791946847,
('ADSK', 'WU'): 39.765895809931905,
('AKAM', 'ANSS'): 84.95618664334728,
('AKAM', 'ATVI'): 138.11864153118768,
('AKAM', 'CA'): 16.636885456824604,
('AKAM', 'CDNS'): 109.05679793343567,
('AKAM', 'CRM'): 51.8854544044754,
('AKAM', 'CTXS'): 26.681146553488418,
('AKAM', 'EA'): 81.89300127017108,
('AKAM', 'EBAY'): 37.65293431813518,
('AKAM', 'FB'): 61.785387799939116,
('AKAM', 'FIS'): 32.41950857974421,
('AKAM', 'FISV'): 32.353809778653115,
('AKAM', 'GOOG'): 41.62565878305333,
('AKAM', 'GOOGL'): 39.397637759370404,
('AKAM', 'INTU'): 47.36271200879837,
('AKAM', 'MA'): 51.764804536559545,
('AKAM', 'MSFT'): 47.223042688254836,
('AKAM', 'NFLX'): 57.59766251382763,
('AKAM', 'NTAP'): 58.30983641058923,
('AKAM', 'ORCL'): 50.1097858975067,
('AKAM', 'PAYX'): 15.048243179305434,
('AKAM', 'RHT'): 99.66045731864762,
('AKAM', 'SNPS'): 65.45853719125701,
('AKAM', 'SYMC'): 33.97428230347582,
('AKAM', 'TSS'): 50.54386420484589,
('AKAM', 'V'): 54.19181180072739,
('AKAM', 'VRSN'): 54.793869519032775,
('AKAM', 'WU'): 7.571338709860404,
('ANSS', 'ATVI'): 8.154563717479729,
('ANSS', 'CA'): 29.943261681141287,
('ANSS', 'CDNS'): 2.5941690067372756,
('ANSS', 'CRM'): 4.534955933384435,
('ANSS', 'CTXS'): 18.552109268627913,
('ANSS', 'EA'): 4.295611339857258,
('ANSS', 'EBAY'): 12.272365956157463,
('ANSS', 'FB'): 3.0382590701096337,
('ANSS', 'FIS'): 14.349432684392086,
('ANSS', 'FISV'): 14.165581404285472,
('ANSS', 'GOOG'): 8.30167829382667,
('ANSS', 'GOOGL'): 9.41935164195621,
('ANSS', 'INTU'): 6.106299464298635,
('ANSS', 'MA'): 5.23414096590403,
('ANSS', 'MSFT'): 5.859227648695128,
('ANSS', 'NFLX'): 4.472687358212998,
('ANSS', 'NTAP'): 5.927731236701019,
('ANSS', 'ORCL'): 7.629628521151207,
('ANSS', 'PAYX'): 29.38047161074107,
('ANSS', 'RHT'): 2.5209045459914985,
('ANSS', 'SNPS'): 2.2015105355353706,
('ANSS', 'SYMC'): 18.897764721038637,
('ANSS', 'TSS'): 5.691196183576592,
('ANSS', 'V'): 4.188946306404346,
('ANSS', 'VRSN'): 4.3669750053096985,
('ANSS', 'WU'): 45.704650169081155,
('ATVI', 'CA'): 61.33711278896739,
('ATVI', 'CDNS'): 4.158582404160264,
('ATVI', 'CRM'): 21.406015165088377,
('ATVI', 'CTXS'): 45.44837761131093,
('ATVI', 'EA'): 9.292584427519238,
('ATVI', 'EBAY'): 32.77265383724729,
('ATVI', 'FB'): 15.839910132367695,
('ATVI', 'FIS'): 37.657095348456345,
('ATVI', 'FISV'): 37.52490631785801,
('ATVI', 'GOOG'): 28.839374630162528,
('ATVI', 'GOOGL'): 30.78521348947124,
('ATVI', 'INTU'): 24.70908645747999,
('ATVI', 'MA'): 22.54702328311286,
('ATVI', 'MSFT'): 24.91574785973942,
('ATVI', 'NFLX'): 19.668042415020217,
('ATVI', 'NTAP'): 24.090786300249558,
('ATVI', 'ORCL'): 23.358783918184045,
('ATVI', 'PAYX'): 63.12238407158666,
('ATVI', 'RHT'): 7.152630916743301,
('ATVI', 'SNPS'): 14.395739157136697,
('ATVI', 'SYMC'): 40.51242765452401,
('ATVI', 'TSS'): 24.22966939518591,
('ATVI', 'V'): 19.88524201271926,
('ATVI', 'VRSN'): 20.12178071584688,
('ATVI', 'WU'): 84.52634607386977,
('CA', 'CDNS'): 44.92324615372572,
('CA', 'CRM'): 11.863681636740417,
('CA', 'CTXS'): 2.512556877203576,
('CA', 'EA'): 25.50215903805071,
('CA', 'EBAY'): 4.951432360509962,
('CA', 'FB'): 16.12777353971046,
('CA', 'FIS'): 3.2432958740817774,
('CA', 'FISV'): 3.167146213222451,
('CA', 'GOOG'): 7.100471658897913,
('CA', 'GOOGL'): 6.139275746371604,
('CA', 'INTU'): 9.84001653266879,
('CA', 'MA'): 12.359129781010669,
('CA', 'MSFT'): 10.246127831626062,
('CA', 'NFLX'): 15.255154153362918,
('CA', 'NTAP'): 19.46742789414594,
('CA', 'ORCL'): 9.570574032140545,
('CA', 'PAYX'): 1.0556054126528271,
('CA', 'RHT'): 39.76651187557996,
('CA', 'SNPS'): 18.481677738986455,
('CA', 'SYMC'): 4.197878201911511,
('CA', 'TSS'): 12.65449529732265,
('CA', 'V'): 12.84349435028879,
('CA', 'VRSN'): 13.201898647636582,
('CA', 'WU'): 2.1979284961737853,
('CDNS', 'CRM'): 11.031947872090864,
('CDNS', 'CTXS'): 30.874006045052283,
('CDNS', 'EA'): 7.423172422335358,
('CDNS', 'EBAY'): 21.48987119937285,
('CDNS', 'FB'): 7.843372412328077,
('CDNS', 'FIS'): 24.65439386658227,
('CDNS', 'FISV'): 25.015929608117617,
('CDNS', 'GOOG'): 17.53980678233555,
('CDNS', 'GOOGL'): 19.139345470598055,
('CDNS', 'INTU'): 13.68628861255111,
('CDNS', 'MA'): 11.40871375099716,
('CDNS', 'MSFT'): 13.254787147719263,
('CDNS', 'NFLX'): 9.685292569671772,
('CDNS', 'NTAP'): 11.56312645877254,
('CDNS', 'ORCL'): 15.148307377550283,
('CDNS', 'PAYX'): 44.03092057783347,
('CDNS', 'RHT'): 0.9235182896273276,
('CDNS', 'SNPS'): 6.079412375484493,
('CDNS', 'SYMC'): 29.04313870451233,
('CDNS', 'TSS'): 12.547036771201697,
('CDNS', 'V'): 10.053963814709995,
('CDNS', 'VRSN'): 9.952639612236819,
('CDNS', 'WU'): 63.83878338302211,
('CRM', 'CTXS'): 5.520854365718749,
('CRM', 'EA'): 5.620478274383742,
('CRM', 'EBAY'): 2.377202652227747,
('CRM', 'FB'): 0.8317637499559798,
('CRM', 'FIS'): 3.09379877979767,
('CRM', 'FISV'): 3.1650678643526873,
('CRM', 'GOOG'): 1.037492391157313,
('CRM', 'GOOGL'): 1.3924248328373303,
('CRM', 'INTU'): 0.5105766639488711,
('CRM', 'MA'): 0.6511549699849806,
('CRM', 'MSFT'): 0.36001117965462753,
('CRM', 'NFLX'): 1.2108957423449946,
('CRM', 'NTAP'): 3.5829751108044983,
('CRM', 'ORCL'): 1.5820850174944878,
('CRM', 'PAYX'): 11.535565085868466,
('CRM', 'RHT'): 9.013410939134108,
('CRM', 'SNPS'): 1.0162211830295222,
('CRM', 'SYMC'): 6.024650812233192,
('CRM', 'TSS'): 1.1024545775554966,
('CRM', 'V'): 0.2072181498473113,
('CRM', 'VRSN'): 0.4678096232573099,
('CRM', 'WU'): 22.29733530684016,
('CTXS', 'EA'): 17.42256764275316,
('CTXS', 'EBAY'): 2.0664057033362875,
('CTXS', 'FB'): 9.18496370039216,
('CTXS', 'FIS'): 1.728746583356205,
('CTXS', 'FISV'): 0.9695653565296607,
('CTXS', 'GOOG'): 2.7079121979048635,
('CTXS', 'GOOGL'): 2.2092940469538815,
('CTXS', 'INTU'): 4.8913709356683865,
('CTXS', 'MA'): 7.086590128788154,
('CTXS', 'MSFT'): 4.861407447629741,
('CTXS', 'NFLX'): 9.027759466937667,
('CTXS', 'NTAP'): 10.444183675011955,
('CTXS', 'ORCL'): 4.95844833705484,
('CTXS', 'PAYX'): 2.501943680909248,
('CTXS', 'RHT'): 27.659407918969343,
('CTXS', 'SNPS'): 9.80419619358976,
('CTXS', 'SYMC'): 2.2462092216226237,
('CTXS', 'TSS'): 7.4228369480356955,
('CTXS', 'V'): 6.62745300090062,
('CTXS', 'VRSN'): 7.24828219747269,
('CTXS', 'WU'): 7.4085995127082125,
('EA', 'EBAY'): 9.214843633540891,
('EA', 'FB'): 3.0258058660745464,
('EA', 'FIS'): 11.70504140967026,
('EA', 'FISV'): 11.794745443152385,
('EA', 'GOOG'): 8.184044968271582,
('EA', 'GOOGL'): 8.965335998092671,
('EA', 'INTU'): 6.68146934576541,
('EA', 'MA'): 6.486667219218422,
('EA', 'MSFT'): 7.480453941051254,
('EA', 'NFLX'): 5.278196164196902,
('EA', 'NTAP'): 12.948636618694895,
('EA', 'ORCL'): 4.819562449968256,
('EA', 'PAYX'): 28.077198829424677,
('EA', 'RHT'): 7.8529307406098745,
('EA', 'SNPS'): 3.7633171009373787,
('EA', 'SYMC'): 13.155803735417425,
('EA', 'TSS'): 8.082116598648454,
('EA', 'V'): 4.845250902788944,
('EA', 'VRSN'): 5.211672155079992,
('EA', 'WU'): 40.958594678584845,
('EBAY', 'FB'): 3.987974429614597,
('EBAY', 'FIS'): 0.6089873412974444,
('EBAY', 'FISV'): 0.5815084586772623,
('EBAY', 'GOOG'): 1.1918500860681327,
('EBAY', 'GOOGL'): 1.0373520435504762,
('EBAY', 'INTU'): 1.9555020215727663,
('EBAY', 'MA'): 3.1530614463168445,
('EBAY', 'MSFT'): 2.2506704286098835,
('EBAY', 'NFLX'): 4.234601166058194,
('EBAY', 'NTAP'): 8.3265036552198,
('EBAY', 'ORCL'): 1.4521191149848256,
('EBAY', 'PAYX'): 5.8822104441942376,
('EBAY', 'RHT'): 18.774253278083243,
('EBAY', 'SNPS'): 5.11620294624582,
('EBAY', 'SYMC'): 1.477334513460374,
('EBAY', 'TSS'): 3.961557624755921,
('EBAY', 'V'): 2.6460063821353605,
('EBAY', 'VRSN'): 3.062778626018295,
('EBAY', 'WU'): 12.778796924220387,
('FB', 'FIS'): 5.1424069826376915,
('FB', 'FISV'): 5.50018265466027,
('FB', 'GOOG'): 2.686980027997973,
('FB', 'GOOGL'): 3.245882311693954,
('FB', 'INTU'): 1.5834553714088195,
('FB', 'MA'): 1.1242821402178662,
('FB', 'MSFT'): 1.4493476910520435,
('FB', 'NFLX'): 0.9715135404963295,
('FB', 'NTAP'): 4.9493478124464785,
('FB', 'ORCL'): 1.7804937489766783,
('FB', 'PAYX'): 16.558389369985022,
('FB', 'RHT'): 6.3083824442035965,
('FB', 'SNPS'): 0.6825352026638856,
('FB', 'SYMC'): 8.101153424870827,
('FB', 'TSS'): 1.8473552613654824,
('FB', 'V'): 0.4706843344746586,
('FB', 'VRSN'): 0.5305884543622407,
('FB', 'WU'): 28.329607998809706,
('FIS', 'FISV'): 0.2857409476522602,
('FIS', 'GOOG'): 1.3040282185579883,
('FIS', 'GOOGL'): 1.0170362853836052,
('FIS', 'INTU'): 2.1799213278728287,
('FIS', 'MA'): 3.324933816903757,
('FIS', 'MSFT'): 2.412316246121379,
('FIS', 'NFLX'): 4.858283616529405,
('FIS', 'NTAP'): 9.163823903199265,
('FIS', 'ORCL'): 2.4308061794139557,
('FIS', 'PAYX'): 3.795920433983037,
('FIS', 'RHT'): 21.064999160442934,
('FIS', 'SNPS'): 6.766284623296644,
('FIS', 'SYMC'): 2.052249740870587,
('FIS', 'TSS'): 3.7658130465777226,
('FIS', 'V'): 3.4082846707821917,
('FIS', 'VRSN'): 3.6497308698130544,
('FIS', 'WU'): 9.662710320419482,
('FISV', 'GOOG'): 1.0702903095153238,
('FISV', 'GOOGL'): 0.7679445764339363,
('FISV', 'INTU'): 2.3070740187462957,
('FISV', 'MA'): 3.9091120301367943,
('FISV', 'MSFT'): 2.626206195495933,
('FISV', 'NFLX'): 5.496221593928438,
('FISV', 'NTAP'): 8.906050140006466,
('FISV', 'ORCL'): 2.3648319164699907,
('FISV', 'PAYX'): 3.7280432472957137,
('FISV', 'RHT'): 21.913964868103836,
('FISV', 'SNPS'): 6.727171411308274,
('FISV', 'SYMC'): 1.741831468355303,
('FISV', 'TSS'): 4.428630778322396,
('FISV', 'V'): 3.683172966223803,
('FISV', 'VRSN'): 4.087982318900063,
('FISV', 'WU'): 9.619206503412636,
('GOOG', 'GOOGL'): 0.04045301814067839,
('GOOG', 'INTU'): 0.5793654361769872,
('GOOG', 'MA'): 1.57201467679406,
('GOOG', 'MSFT'): 0.6851416214529011,
('GOOG', 'NFLX'): 2.5620267781333617,
('GOOG', 'NTAP'): 5.359845564695879,
('GOOG', 'ORCL'): 1.7557676635576023,
('GOOG', 'PAYX'): 7.061983751434619,
('GOOG', 'RHT'): 14.870873023159652,
('GOOG', 'SNPS'): 3.523679515988144,
('GOOG', 'SYMC'): 4.009779642386653,
('GOOG', 'TSS'): 1.8966194684498623,
('GOOG', 'V'): 1.4285449382500413,
('GOOG', 'VRSN'): 1.7984341071492365,
('GOOG', 'WU'): 15.58635530830525,
('GOOGL', 'INTU'): 0.8224222462129634,
('GOOGL', 'MA'): 1.967264997216336,
('GOOGL', 'MSFT'): 0.9615383146739326,
('GOOGL', 'NFLX'): 3.0681795056864223,
('GOOGL', 'NTAP'): 6.032004586226634,
('GOOGL', 'ORCL'): 1.8798127945001002,
('GOOGL', 'PAYX'): 6.186157455246015,
('GOOGL', 'RHT'): 16.332732227463648,
('GOOGL', 'SNPS'): 4.190042780132229,
('GOOGL', 'SYMC'): 3.5475571926758573,
('GOOGL', 'TSS'): 2.309795545799717,
('GOOGL', 'V'): 1.8584326271076137,
('GOOGL', 'VRSN'): 2.231789356877014,
('GOOGL', 'WU'): 14.147208116409324,
('INTU', 'MA'): 0.5980149073455097,
('INTU', 'MSFT'): 0.2961582963157633,
('INTU', 'NFLX'): 1.5384904822032306,
('INTU', 'NTAP'): 4.281235179423031,
('INTU', 'ORCL'): 1.9354725729482922,
('INTU', 'PAYX'): 9.38788023137246,
('INTU', 'RHT'): 11.134611012675055,
('INTU', 'SNPS'): 2.2413013084682385,
('INTU', 'SYMC'): 5.798862864926634,
('INTU', 'TSS'): 0.8811381731971999,
('INTU', 'V'): 0.602668214336919,
('INTU', 'VRSN'): 0.8088193648550983,
('INTU', 'WU'): 19.441097299480838,
('MA', 'MSFT'): 0.39764108964701733,
('MA', 'NFLX'): 0.6369017056832544,
('MA', 'NTAP'): 3.877963044716283,
('MA', 'ORCL'): 2.6413256916262826,
('MA', 'PAYX'): 11.709249008900832,
('MA', 'RHT'): 8.450039758453155,
('MA', 'SNPS'): 1.9403223823745646,
('MA', 'SYMC'): 7.559165147367271,
('MA', 'TSS'): 0.41429499358425526,
('MA', 'V'): 0.39234837790166055,
('MA', 'VRSN'): 0.36546272129588436,
('MA', 'WU'): 22.543787681119465,
('MSFT', 'NFLX'): 1.2333220553378121,
('MSFT', 'NTAP'): 3.3963282089728937,
('MSFT', 'ORCL'): 2.1355365216745135,
('MSFT', 'PAYX'): 9.510533440478504,
('MSFT', 'RHT'): 10.450809868954009,
('MSFT', 'SNPS'): 2.058986395609634,
('MSFT', 'SYMC'): 6.190732497974761,
('MSFT', 'TSS'): 0.5873446524515048,
('MSFT', 'V'): 0.4652088816480763,
('MSFT', 'VRSN'): 0.612903633991063,
('MSFT', 'WU'): 19.7106251657355,
('NFLX', 'NTAP'): 4.456918206072289,
('NFLX', 'ORCL'): 3.1475968232629095,
('NFLX', 'PAYX'): 14.878855391721492,
('NFLX', 'RHT'): 7.3574851108681445,
('NFLX', 'SNPS'): 1.8966399696234966,
('NFLX', 'SYMC'): 8.642587930021216,
('NFLX', 'TSS'): 1.2638483920444297,
('NFLX', 'V'): 0.7914721044148963,
('NFLX', 'VRSN'): 0.8830585373131893,
('NFLX', 'WU'): 26.23052590307758,
('NTAP', 'ORCL'): 7.877315748960216,
('NTAP', 'PAYX'): 16.75099828553493,
('NTAP', 'RHT'): 9.196640717701607,
('NTAP', 'SNPS'): 3.886666292911035,
('NTAP', 'SYMC'): 14.238243898081427,
('NTAP', 'TSS'): 3.4498954116894454,
('NTAP', 'V'): 3.71029345139542,
('NTAP', 'VRSN'): 3.846009116894125,
('NTAP', 'WU'): 30.38583022136295,
('ORCL', 'PAYX'): 11.347323330799314,
('ORCL', 'RHT'): 13.280774822778579,
('ORCL', 'SNPS'): 2.710249369916319,
('ORCL', 'SYMC'): 3.565852117237573,
('ORCL', 'TSS'): 3.722227037652517,
('ORCL', 'V'): 1.613623668970272,
('ORCL', 'VRSN'): 1.8764910358029467,
('ORCL', 'WU'): 19.993499952513673,
('PAYX', 'RHT'): 38.58053388646707,
('PAYX', 'SNPS'): 18.258422223208996,
('PAYX', 'SYMC'): 5.920863136124079,
('PAYX', 'TSS'): 11.580753296403191,
('PAYX', 'V'): 12.677776781877178,
('PAYX', 'VRSN'): 12.967781798160182,
('PAYX', 'WU'): 2.403220488062168,
('RHT', 'SNPS'): 5.1797755243532455,
('RHT', 'SYMC'): 26.59712869275919,
('RHT', 'TSS'): 9.35679983077451,
('RHT', 'V'): 8.008434189190275,
('RHT', 'VRSN'): 7.470214569170028,
('RHT', 'WU'): 57.463025756540006,
('SNPS', 'SYMC'): 9.25913655540079,
('SNPS', 'TSS'): 2.7893004253939844,
('SNPS', 'V'): 0.8907311970171012,
('SNPS', 'VRSN'): 1.1277965248402264,
('SNPS', 'WU'): 31.367380362236766,
('SYMC', 'TSS'): 9.00710896810785,
('SYMC', 'V'): 6.625221625259926,
('SYMC', 'VRSN'): 7.133660727502845,
('SYMC', 'WU'): 10.572458910322382,
('TSS', 'V'): 0.898612533002861,
('TSS', 'VRSN'): 0.938963808222344,
('TSS', 'WU'): 22.493071934956873,
('V', 'VRSN'): 0.27237885594731637,
('V', 'WU'): 23.734044154960266,
('VRSN', 'WU'): 24.175810311065415\}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}338}]:} \PY{c+c1}{\PYZsh{} Sort dictionary by value}
\PY{n}{d} \PY{o}{=} \PY{n}{dict\PYZus{}pairs}
\PY{c+c1}{\PYZsh{} Note: Dictionaries prior to Python 3.6 (I believe,) are not ordered.}
\PY{c+c1}{\PYZsh{} So returning in }
\PY{n}{list\PYZus{}minimized\PYZus{}sum\PYZus{}of\PYZus{}squared\PYZus{}deviations} \PY{o}{=} \PY{p}{[}\PY{p}{(}\PY{n}{k}\PY{p}{,} \PY{n}{d}\PY{p}{[}\PY{n}{k}\PY{p}{]}\PY{p}{)} \PY{k}{for} \PY{n}{k} \PY{o+ow}{in} \PY{n+nb}{sorted}\PY{p}{(}\PY{n}{d}\PY{p}{,} \PY{n}{key}\PY{o}{=}\PY{n}{d}\PY{o}{.}\PY{n}{get}\PY{p}{,} \PY{n}{reverse}\PY{o}{=}\PY{k+kc}{False}\PY{p}{)}\PY{p}{]}
\PY{n}{list\PYZus{}minimized\PYZus{}sum\PYZus{}of\PYZus{}squared\PYZus{}deviations}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}338}]:} [(('GOOG', 'GOOGL'), 0.04045301814067839),
(('CRM', 'V'), 0.2072181498473113),
(('V', 'VRSN'), 0.27237885594731637),
(('FIS', 'FISV'), 0.2857409476522602),
(('INTU', 'MSFT'), 0.2961582963157633),
(('CRM', 'MSFT'), 0.36001117965462753),
(('MA', 'VRSN'), 0.36546272129588436),
(('MA', 'V'), 0.39234837790166055),
(('MA', 'MSFT'), 0.39764108964701733),
(('MA', 'TSS'), 0.41429499358425526),
(('MSFT', 'V'), 0.4652088816480763),
(('CRM', 'VRSN'), 0.4678096232573099),
(('FB', 'V'), 0.4706843344746586),
(('CRM', 'INTU'), 0.5105766639488711),
(('FB', 'VRSN'), 0.5305884543622407),
(('GOOG', 'INTU'), 0.5793654361769872),
(('EBAY', 'FISV'), 0.5815084586772623),
(('MSFT', 'TSS'), 0.5873446524515048),
(('INTU', 'MA'), 0.5980149073455097),
(('INTU', 'V'), 0.602668214336919),
(('EBAY', 'FIS'), 0.6089873412974444),
(('MSFT', 'VRSN'), 0.612903633991063),
(('MA', 'NFLX'), 0.6369017056832544),
(('CRM', 'MA'), 0.6511549699849806),
(('FB', 'SNPS'), 0.6825352026638856),
(('GOOG', 'MSFT'), 0.6851416214529011),
(('FISV', 'GOOGL'), 0.7679445764339363),
(('NFLX', 'V'), 0.7914721044148963),
(('INTU', 'VRSN'), 0.8088193648550983),
(('GOOGL', 'INTU'), 0.8224222462129634),
(('ADP', 'PAYX'), 0.8226419562575669),
(('CRM', 'FB'), 0.8317637499559798),
(('ADBE', 'ANSS'), 0.8696850922794794),
(('INTU', 'TSS'), 0.8811381731971999),
(('NFLX', 'VRSN'), 0.8830585373131893),
(('SNPS', 'V'), 0.8907311970171012),
(('TSS', 'V'), 0.898612533002861),
(('CDNS', 'RHT'), 0.9235182896273276),
(('TSS', 'VRSN'), 0.938963808222344),
(('GOOGL', 'MSFT'), 0.9615383146739326),
(('CTXS', 'FISV'), 0.9695653565296607),
(('FB', 'NFLX'), 0.9715135404963295),
(('CRM', 'SNPS'), 1.0162211830295222),
(('FIS', 'GOOGL'), 1.0170362853836052),
(('ADP', 'CA'), 1.036966431412505),
(('EBAY', 'GOOGL'), 1.0373520435504762),
(('CRM', 'GOOG'), 1.037492391157313),
(('CA', 'PAYX'), 1.0556054126528271),
(('FISV', 'GOOG'), 1.0702903095153238),
(('CRM', 'TSS'), 1.1024545775554966),
(('FB', 'MA'), 1.1242821402178662),
(('SNPS', 'VRSN'), 1.1277965248402264),
(('ADBE', 'RHT'), 1.153945770702228),
(('EBAY', 'GOOG'), 1.1918500860681327),
(('CRM', 'NFLX'), 1.2108957423449946),
(('ADBE', 'CDNS'), 1.2265447236409859),
(('MSFT', 'NFLX'), 1.2333220553378121),
(('NFLX', 'TSS'), 1.2638483920444297),
(('FIS', 'GOOG'), 1.3040282185579883),
(('CRM', 'GOOGL'), 1.3924248328373303),
(('GOOG', 'V'), 1.4285449382500413),
(('FB', 'MSFT'), 1.4493476910520435),
(('EBAY', 'ORCL'), 1.4521191149848256),
(('EBAY', 'SYMC'), 1.477334513460374),
(('ADSK', 'EA'), 1.4927197476600735),
(('INTU', 'NFLX'), 1.5384904822032306),
(('GOOG', 'MA'), 1.57201467679406),
(('CRM', 'ORCL'), 1.5820850174944878),
(('FB', 'INTU'), 1.5834553714088195),
(('ADP', 'FIS'), 1.6134987587691578),
(('ORCL', 'V'), 1.613623668970272),
(('CTXS', 'FIS'), 1.728746583356205),
(('FISV', 'SYMC'), 1.741831468355303),
(('GOOG', 'ORCL'), 1.7557676635576023),
(('FB', 'ORCL'), 1.7804937489766783),
(('GOOG', 'VRSN'), 1.7984341071492365),
(('ADP', 'FISV'), 1.8235753149414649),
(('FB', 'TSS'), 1.8473552613654824),
(('GOOGL', 'V'), 1.8584326271076137),
(('ORCL', 'VRSN'), 1.8764910358029467),
(('GOOGL', 'ORCL'), 1.8798127945001002),
(('GOOG', 'TSS'), 1.8966194684498623),
(('NFLX', 'SNPS'), 1.8966399696234966),
(('ADP', 'CTXS'), 1.9153141277466614),
(('INTU', 'ORCL'), 1.9354725729482922),
(('MA', 'SNPS'), 1.9403223823745646),
(('EBAY', 'INTU'), 1.9555020215727663),
(('GOOGL', 'MA'), 1.967264997216336),
(('FIS', 'SYMC'), 2.052249740870587),
(('MSFT', 'SNPS'), 2.058986395609634),
(('CTXS', 'EBAY'), 2.0664057033362875),
(('MSFT', 'ORCL'), 2.1355365216745135),
(('FIS', 'INTU'), 2.1799213278728287),
(('CA', 'WU'), 2.1979284961737853),
(('ANSS', 'SNPS'), 2.2015105355353706),
(('CTXS', 'GOOGL'), 2.2092940469538815),
(('GOOGL', 'VRSN'), 2.231789356877014),
(('INTU', 'SNPS'), 2.2413013084682385),
(('CTXS', 'SYMC'), 2.2462092216226237),
(('EBAY', 'MSFT'), 2.2506704286098835),
(('FISV', 'INTU'), 2.3070740187462957),
(('GOOGL', 'TSS'), 2.309795545799717),
(('ADSK', 'FB'), 2.345486843074066),
(('FISV', 'ORCL'), 2.3648319164699907),
(('CRM', 'EBAY'), 2.377202652227747),
(('PAYX', 'WU'), 2.403220488062168),
(('FIS', 'MSFT'), 2.412316246121379),
(('FIS', 'ORCL'), 2.4308061794139557),
(('CTXS', 'PAYX'), 2.501943680909248),
(('CA', 'CTXS'), 2.512556877203576),
(('ANSS', 'RHT'), 2.5209045459914985),
(('GOOG', 'NFLX'), 2.5620267781333617),
(('ANSS', 'CDNS'), 2.5941690067372756),
(('FISV', 'MSFT'), 2.626206195495933),
(('MA', 'ORCL'), 2.6413256916262826),
(('EBAY', 'V'), 2.6460063821353605),
(('FB', 'GOOG'), 2.686980027997973),
(('CTXS', 'GOOG'), 2.7079121979048635),
(('ORCL', 'SNPS'), 2.710249369916319),
(('ADSK', 'SNPS'), 2.7797994513233686),
(('SNPS', 'TSS'), 2.7893004253939844),
(('ADSK', 'ANSS'), 2.802816676496424),
(('EA', 'FB'), 3.0258058660745464),
(('ANSS', 'FB'), 3.0382590701096337),
(('EBAY', 'VRSN'), 3.062778626018295),
(('GOOGL', 'NFLX'), 3.0681795056864223),
(('CRM', 'FIS'), 3.09379877979767),
(('NFLX', 'ORCL'), 3.1475968232629095),
(('EBAY', 'MA'), 3.1530614463168445),
(('CRM', 'FISV'), 3.1650678643526873),
(('CA', 'FISV'), 3.167146213222451),
(('CA', 'FIS'), 3.2432958740817774),
(('FB', 'GOOGL'), 3.245882311693954),
(('ADP', 'EBAY'), 3.3230907679222694),
(('FIS', 'MA'), 3.324933816903757),
(('MSFT', 'NTAP'), 3.3963282089728937),
(('FIS', 'V'), 3.4082846707821917),
(('NTAP', 'TSS'), 3.4498954116894454),
(('GOOG', 'SNPS'), 3.523679515988144),
(('GOOGL', 'SYMC'), 3.5475571926758573),
(('ORCL', 'SYMC'), 3.565852117237573),
(('CRM', 'NTAP'), 3.5829751108044983),
(('FIS', 'VRSN'), 3.6497308698130544),
(('ADSK', 'NFLX'), 3.651072813726262),
(('FISV', 'V'), 3.683172966223803),
(('NTAP', 'V'), 3.71029345139542),
(('ORCL', 'TSS'), 3.722227037652517),
(('FISV', 'PAYX'), 3.7280432472957137),
(('EA', 'SNPS'), 3.7633171009373787),
(('FIS', 'TSS'), 3.7658130465777226),
(('FIS', 'PAYX'), 3.795920433983037),
(('ADBE', 'SNPS'), 3.811917449321886),
(('ADP', 'SYMC'), 3.8265242380171927),
(('ADSK', 'VRSN'), 3.838493791946847),
(('NTAP', 'VRSN'), 3.846009116894125),
(('ADSK', 'V'), 3.866485990508485),
(('MA', 'NTAP'), 3.877963044716283),
(('NTAP', 'SNPS'), 3.886666292911035),
(('FISV', 'MA'), 3.9091120301367943),
(('ADP', 'GOOGL'), 3.942300296773336),
(('EBAY', 'TSS'), 3.961557624755921),
(('EBAY', 'FB'), 3.987974429614597),
(('ADP', 'WU'), 4.0021985121418195),
(('GOOG', 'SYMC'), 4.009779642386653),
(('FISV', 'VRSN'), 4.087982318900063),
(('ATVI', 'CDNS'), 4.158582404160264),
(('ADBE', 'ADSK'), 4.158609456332218),
(('ANSS', 'V'), 4.188946306404346),
(('GOOGL', 'SNPS'), 4.190042780132229),
(('CA', 'SYMC'), 4.197878201911511),
(('EBAY', 'NFLX'), 4.234601166058194),
(('INTU', 'NTAP'), 4.281235179423031),
(('ADSK', 'CRM'), 4.2864926579759945),
(('ANSS', 'EA'), 4.295611339857258),
(('ANSS', 'VRSN'), 4.3669750053096985),
(('FISV', 'TSS'), 4.428630778322396),
(('NFLX', 'NTAP'), 4.456918206072289),
(('ANSS', 'NFLX'), 4.472687358212998),
(('ANSS', 'CRM'), 4.534955933384435),
(('ADP', 'GOOG'), 4.618847052101154),
(('ADSK', 'MA'), 4.727316247730728),
(('EA', 'ORCL'), 4.819562449968256),
(('EA', 'V'), 4.845250902788944),
(('FIS', 'NFLX'), 4.858283616529405),
(('CTXS', 'MSFT'), 4.861407447629741),
(('CTXS', 'INTU'), 4.8913709356683865),
(('FB', 'NTAP'), 4.9493478124464785),
(('CA', 'EBAY'), 4.951432360509962),
(('CTXS', 'ORCL'), 4.95844833705484),
(('ADBE', 'FB'), 4.966219754693422),
(('ADSK', 'ORCL'), 5.08922384096354),
(('EBAY', 'SNPS'), 5.11620294624582),
(('FB', 'FIS'), 5.1424069826376915),
(('RHT', 'SNPS'), 5.1797755243532455),
(('EA', 'VRSN'), 5.211672155079992),
(('ANSS', 'MA'), 5.23414096590403),
(('EA', 'NFLX'), 5.278196164196902),
(('ADSK', 'RHT'), 5.3463430031778),
(('GOOG', 'NTAP'), 5.359845564695879),
(('ADSK', 'INTU'), 5.3842964614956),
(('FISV', 'NFLX'), 5.496221593928438),
(('FB', 'FISV'), 5.50018265466027),
(('CRM', 'CTXS'), 5.520854365718749),
(('ADSK', 'CDNS'), 5.594513994395422),
(('CRM', 'EA'), 5.620478274383742),
(('ANSS', 'TSS'), 5.691196183576592),
(('INTU', 'SYMC'), 5.798862864926634),
(('ADSK', 'MSFT'), 5.841219057017932),
(('ANSS', 'MSFT'), 5.859227648695128),
(('EBAY', 'PAYX'), 5.8822104441942376),
(('PAYX', 'SYMC'), 5.920863136124079),
(('ANSS', 'NTAP'), 5.927731236701019),
(('CRM', 'SYMC'), 6.024650812233192),
(('GOOGL', 'NTAP'), 6.032004586226634),
(('CDNS', 'SNPS'), 6.079412375484493),
(('ANSS', 'INTU'), 6.106299464298635),
(('CA', 'GOOGL'), 6.139275746371604),
(('GOOGL', 'PAYX'), 6.186157455246015),
(('MSFT', 'SYMC'), 6.190732497974761),
(('ADBE', 'EA'), 6.204815344227672),
(('ADBE', 'NFLX'), 6.259575001444054),
(('ADSK', 'TSS'), 6.303920608506605),
(('FB', 'RHT'), 6.3083824442035965),
(('ADP', 'INTU'), 6.380018278467871),
(('ADBE', 'V'), 6.474267995072576),
(('EA', 'MA'), 6.486667219218422),
(('ADP', 'MSFT'), 6.495723204426145),
(('ADBE', 'ATVI'), 6.517351640000458),
(('ADBE', 'VRSN'), 6.529076057006827),
(('SYMC', 'V'), 6.625221625259926),
(('CTXS', 'V'), 6.62745300090062),
(('EA', 'INTU'), 6.68146934576541),
(('FISV', 'SNPS'), 6.727171411308274),
(('FIS', 'SNPS'), 6.766284623296644),
(('GOOG', 'PAYX'), 7.061983751434619),
(('ADBE', 'CRM'), 7.085918839247967),
(('CTXS', 'MA'), 7.086590128788154),
(('CA', 'GOOG'), 7.100471658897913),
(('SYMC', 'VRSN'), 7.133660727502845),
(('ATVI', 'RHT'), 7.152630916743301),
(('ADSK', 'GOOG'), 7.189954599180913),
(('CTXS', 'VRSN'), 7.24828219747269),
(('ADP', 'ORCL'), 7.30246716338635),
(('NFLX', 'RHT'), 7.3574851108681445),
(('CTXS', 'WU'), 7.4085995127082125),
(('ADBE', 'MA'), 7.418980673896577),
(('CTXS', 'TSS'), 7.4228369480356955),
(('CDNS', 'EA'), 7.423172422335358),
(('RHT', 'VRSN'), 7.470214569170028),
(('EA', 'MSFT'), 7.480453941051254),
(('MA', 'SYMC'), 7.559165147367271),
(('AKAM', 'WU'), 7.571338709860404),
(('ANSS', 'ORCL'), 7.629628521151207),
(('ADBE', 'NTAP'), 7.682165352235385),
(('CDNS', 'FB'), 7.843372412328077),
(('EA', 'RHT'), 7.8529307406098745),
(('NTAP', 'ORCL'), 7.877315748960216),
(('ADSK', 'GOOGL'), 7.974748080038076),
(('RHT', 'V'), 8.008434189190275),
(('ADP', 'MA'), 8.06633351721472),
(('EA', 'TSS'), 8.082116598648454),
(('ADBE', 'TSS'), 8.092865305396348),
(('FB', 'SYMC'), 8.101153424870827),
(('ADP', 'CRM'), 8.147196074456309),
(('ANSS', 'ATVI'), 8.154563717479729),
(('ADP', 'TSS'), 8.179723739097994),
(('EA', 'GOOG'), 8.184044968271582),
(('ANSS', 'GOOG'), 8.30167829382667),
(('EBAY', 'NTAP'), 8.3265036552198),
(('MA', 'RHT'), 8.450039758453155),
(('ADBE', 'MSFT'), 8.58283144569894),
(('NFLX', 'SYMC'), 8.642587930021216),
(('ADP', 'V'), 8.77549584911996),
(('FISV', 'NTAP'), 8.906050140006466),
(('EA', 'GOOGL'), 8.965335998092671),
(('ADP', 'VRSN'), 8.991016449671067),
(('SYMC', 'TSS'), 9.00710896810785),
(('CRM', 'RHT'), 9.013410939134108),
(('CTXS', 'NFLX'), 9.027759466937667),
(('FIS', 'NTAP'), 9.163823903199265),
(('CTXS', 'FB'), 9.18496370039216),
(('NTAP', 'RHT'), 9.196640717701607),
(('EA', 'EBAY'), 9.214843633540891),
(('ADBE', 'INTU'), 9.254211425211263),
(('SNPS', 'SYMC'), 9.25913655540079),
(('ADSK', 'EBAY'), 9.288085300886838),
(('ATVI', 'EA'), 9.292584427519238),
(('RHT', 'TSS'), 9.35679983077451),
(('INTU', 'PAYX'), 9.38788023137246),
(('ANSS', 'GOOGL'), 9.41935164195621),
(('MSFT', 'PAYX'), 9.510533440478504),
(('CA', 'ORCL'), 9.570574032140545),
(('FISV', 'WU'), 9.619206503412636),
(('FIS', 'WU'), 9.662710320419482),
(('CDNS', 'NFLX'), 9.685292569671772),
(('CTXS', 'SNPS'), 9.80419619358976),
(('CA', 'INTU'), 9.84001653266879),
(('CDNS', 'VRSN'), 9.952639612236819),
(('CDNS', 'V'), 10.053963814709995),
(('CA', 'MSFT'), 10.246127831626062),
(('CTXS', 'NTAP'), 10.444183675011955),
(('MSFT', 'RHT'), 10.450809868954009),
(('ADSK', 'ATVI'), 10.473166879619512),
(('ADP', 'NFLX'), 10.513119519449324),
(('SYMC', 'WU'), 10.572458910322382),
(('ADSK', 'NTAP'), 10.686077929230263),
(('ADBE', 'ORCL'), 10.948462323843168),
(('CDNS', 'CRM'), 11.031947872090864),
(('ADSK', 'FIS'), 11.111437876124937),
(('INTU', 'RHT'), 11.134611012675055),
(('ORCL', 'PAYX'), 11.347323330799314),
(('CDNS', 'MA'), 11.40871375099716),
(('ADSK', 'FISV'), 11.457785548992558),
(('CRM', 'PAYX'), 11.535565085868466),
(('CDNS', 'NTAP'), 11.56312645877254),
(('PAYX', 'TSS'), 11.580753296403191),
(('EA', 'FIS'), 11.70504140967026),
(('ADP', 'FB'), 11.706227210933367),
(('MA', 'PAYX'), 11.709249008900832),
(('EA', 'FISV'), 11.794745443152385),
(('CA', 'CRM'), 11.863681636740417),
(('ADBE', 'GOOG'), 12.213583359818005),
(('ANSS', 'EBAY'), 12.272365956157463),
(('CA', 'MA'), 12.359129781010669),
(('CDNS', 'TSS'), 12.547036771201697),
(('CA', 'TSS'), 12.65449529732265),
(('PAYX', 'V'), 12.677776781877178),
(('EBAY', 'WU'), 12.778796924220387),
(('CA', 'V'), 12.84349435028879),
(('EA', 'NTAP'), 12.948636618694895),
(('PAYX', 'VRSN'), 12.967781798160182),
(('EA', 'SYMC'), 13.155803735417425),
(('CA', 'VRSN'), 13.201898647636582),
(('CDNS', 'MSFT'), 13.254787147719263),
(('ORCL', 'RHT'), 13.280774822778579),
(('ADBE', 'GOOGL'), 13.570674681293951),
(('CDNS', 'INTU'), 13.68628861255111),
(('ADP', 'SNPS'), 13.84130036823279),
(('ADSK', 'SYMC'), 13.912047655573508),
(('ADP', 'NTAP'), 14.013947246377636),
(('GOOGL', 'WU'), 14.147208116409324),
(('ANSS', 'FISV'), 14.165581404285472),
(('NTAP', 'SYMC'), 14.238243898081427),
(('ANSS', 'FIS'), 14.349432684392086),
(('ATVI', 'SNPS'), 14.395739157136697),
(('GOOG', 'RHT'), 14.870873023159652),
(('NFLX', 'PAYX'), 14.878855391721492),
(('AKAM', 'PAYX'), 15.048243179305434),
(('CDNS', 'ORCL'), 15.148307377550283),
(('CA', 'NFLX'), 15.255154153362918),
(('GOOG', 'WU'), 15.58635530830525),
(('ATVI', 'FB'), 15.839910132367695),
(('CA', 'FB'), 16.12777353971046),
(('GOOGL', 'RHT'), 16.332732227463648),
(('ADBE', 'EBAY'), 16.506262595260367),
(('FB', 'PAYX'), 16.558389369985022),
(('AKAM', 'CA'), 16.636885456824604),
(('ADSK', 'CTXS'), 16.744195321732526),
(('NTAP', 'PAYX'), 16.75099828553493),
(('CTXS', 'EA'), 17.42256764275316),
(('CDNS', 'GOOG'), 17.53980678233555),
(('PAYX', 'SNPS'), 18.258422223208996),
(('CA', 'SNPS'), 18.481677738986455),
(('ANSS', 'CTXS'), 18.552109268627913),
(('EBAY', 'RHT'), 18.774253278083243),
(('ADBE', 'FIS'), 18.847753171034842),
(('ANSS', 'SYMC'), 18.897764721038637),
(('ADBE', 'FISV'), 19.019434180821282),
(('CDNS', 'GOOGL'), 19.139345470598055),
(('INTU', 'WU'), 19.441097299480838),
(('CA', 'NTAP'), 19.46742789414594),
(('ATVI', 'NFLX'), 19.668042415020217),
(('MSFT', 'WU'), 19.7106251657355),
(('ATVI', 'V'), 19.88524201271926),
(('ORCL', 'WU'), 19.993499952513673),
(('ATVI', 'VRSN'), 20.12178071584688),
(('ADP', 'ADSK'), 20.28697737936668),
(('ADP', 'AKAM'), 20.431752323892752),
(('FIS', 'RHT'), 21.064999160442934),
(('ADP', 'EA'), 21.35795561141269),
(('ATVI', 'CRM'), 21.406015165088377),
(('CDNS', 'EBAY'), 21.48987119937285),
(('FISV', 'RHT'), 21.913964868103836),
(('CRM', 'WU'), 22.29733530684016),
(('TSS', 'WU'), 22.493071934956873),
(('MA', 'WU'), 22.543787681119465),
(('ATVI', 'MA'), 22.54702328311286),
(('ATVI', 'ORCL'), 23.358783918184045),
(('V', 'WU'), 23.734044154960266),
(('ADBE', 'SYMC'), 23.828023833314973),
(('ADP', 'ANSS'), 23.84709906933113),
(('ADBE', 'CTXS'), 23.967198766619457),
(('ATVI', 'NTAP'), 24.090786300249558),
(('VRSN', 'WU'), 24.175810311065415),
(('ATVI', 'TSS'), 24.22966939518591),
(('CDNS', 'FIS'), 24.65439386658227),
(('ATVI', 'INTU'), 24.70908645747999),
(('ATVI', 'MSFT'), 24.91574785973942),
(('ADSK', 'CA'), 25.00253404565265),
(('CDNS', 'FISV'), 25.015929608117617),
(('CA', 'EA'), 25.50215903805071),
(('NFLX', 'WU'), 26.23052590307758),
(('ADSK', 'PAYX'), 26.287252771312566),
(('RHT', 'SYMC'), 26.59712869275919),
(('AKAM', 'CTXS'), 26.681146553488418),
(('CTXS', 'RHT'), 27.659407918969343),
(('EA', 'PAYX'), 28.077198829424677),
(('FB', 'WU'), 28.329607998809706),
(('ATVI', 'GOOG'), 28.839374630162528),
(('CDNS', 'SYMC'), 29.04313870451233),
(('ADBE', 'ADP'), 29.376129693592553),
(('ANSS', 'PAYX'), 29.38047161074107),
(('ANSS', 'CA'), 29.943261681141287),
(('NTAP', 'WU'), 30.38583022136295),
(('ATVI', 'GOOGL'), 30.78521348947124),
(('CDNS', 'CTXS'), 30.874006045052283),
(('SNPS', 'WU'), 31.367380362236766),
(('ADP', 'RHT'), 32.007280537056104),
(('AKAM', 'FISV'), 32.353809778653115),
(('AKAM', 'FIS'), 32.41950857974421),
(('ATVI', 'EBAY'), 32.77265383724729),
(('AKAM', 'SYMC'), 33.97428230347582),
(('ADBE', 'PAYX'), 35.64310708952115),
(('ADBE', 'CA'), 36.511020170463276),
(('ADP', 'CDNS'), 36.91168258814312),
(('ATVI', 'FISV'), 37.52490631785801),
(('AKAM', 'EBAY'), 37.65293431813518),
(('ATVI', 'FIS'), 37.657095348456345),
(('PAYX', 'RHT'), 38.58053388646707),
(('AKAM', 'GOOGL'), 39.397637759370404),
(('ADSK', 'WU'), 39.765895809931905),
(('CA', 'RHT'), 39.76651187557996),
(('ATVI', 'SYMC'), 40.51242765452401),
(('EA', 'WU'), 40.958594678584845),
(('AKAM', 'GOOG'), 41.62565878305333),
(('CDNS', 'PAYX'), 44.03092057783347),
(('CA', 'CDNS'), 44.92324615372572),
(('ATVI', 'CTXS'), 45.44837761131093),
(('ANSS', 'WU'), 45.704650169081155),
(('AKAM', 'MSFT'), 47.223042688254836),
(('AKAM', 'INTU'), 47.36271200879837),
(('AKAM', 'ORCL'), 50.1097858975067),
(('AKAM', 'TSS'), 50.54386420484589),
(('AKAM', 'MA'), 51.764804536559545),
(('AKAM', 'CRM'), 51.8854544044754),
(('ADBE', 'WU'), 53.5264717931703),
(('ADP', 'ATVI'), 53.57235979823229),
(('AKAM', 'V'), 54.19181180072739),
(('AKAM', 'VRSN'), 54.793869519032775),
(('RHT', 'WU'), 57.463025756540006),
(('AKAM', 'NFLX'), 57.59766251382763),
(('AKAM', 'NTAP'), 58.30983641058923),
(('ATVI', 'CA'), 61.33711278896739),
(('AKAM', 'FB'), 61.785387799939116),
(('ATVI', 'PAYX'), 63.12238407158666),
(('CDNS', 'WU'), 63.83878338302211),
(('AKAM', 'SNPS'), 65.45853719125701),
(('ADSK', 'AKAM'), 79.44756182547066),
(('AKAM', 'EA'), 81.89300127017108),
(('ATVI', 'WU'), 84.52634607386977),
(('AKAM', 'ANSS'), 84.95618664334728),
(('ADBE', 'AKAM'), 94.87267386981122),
(('AKAM', 'RHT'), 99.66045731864762),
(('AKAM', 'CDNS'), 109.05679793343567),
(('AKAM', 'ATVI'), 138.11864153118768)]
\end{Verbatim}
\subsubsection{Minimized Sum of Squared Deviations within the Related
Industry Only -
Semiconductors}\label{minimized-sum-of-squared-deviations-within-the-related-industry-only---semiconductors}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}339}]:} \PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/39203662/euclidean\PYZhy{}distance\PYZhy{}matrix\PYZhy{}using\PYZhy{}pandas}
\PY{c+c1}{\PYZsh{}pd.DataFrame( , columns = df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors.columns, index = df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors.index)}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/41337316/pandas\PYZhy{}compare\PYZhy{}all\PYZhy{}dataframe\PYZhy{}columns\PYZhy{}with\PYZhy{}eachother}
\PY{n}{df} \PY{o}{=} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{copy}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} This is a numpy array}
\PY{n}{ndarray\PYZus{}a} \PY{o}{=} \PY{n}{df}\PY{o}{.}\PY{n}{values}
\PY{n}{column\PYZus{}names} \PY{o}{=} \PY{n}{df}\PY{o}{.}\PY{n}{columns}
\PY{c+c1}{\PYZsh{} Multiplying every column by every other column}
\PY{c+c1}{\PYZsh{}list\PYZus{}for\PYZus{}matrix\PYZus{}a = []}
\PY{n}{dict\PYZus{}pairs} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{i} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{ndarray\PYZus{}a}\PY{o}{.}\PY{n}{T}\PY{p}{)}\PY{p}{)}\PY{p}{:}
\PY{n}{column\PYZus{}name\PYZus{}in\PYZus{}focus} \PY{o}{=} \PY{n}{column\PYZus{}names}\PY{p}{[}\PY{n}{i}\PY{p}{]}
\PY{n}{column\PYZus{}values\PYZus{}in\PYZus{}focus} \PY{o}{=} \PY{n}{ndarray\PYZus{}a}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{n}{i}\PY{p}{]}
\PY{c+c1}{\PYZsh{} The other columns}
\PY{k}{for} \PY{n}{j} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{ndarray\PYZus{}a}\PY{o}{.}\PY{n}{T}\PY{p}{)}\PY{p}{)}\PY{p}{:}
\PY{c+c1}{\PYZsh{} Skip comparison to itself }
\PY{k}{if} \PY{n}{j} \PY{o}{==} \PY{n}{i}\PY{p}{:}
\PY{k}{continue}
\PY{n}{column\PYZus{}name\PYZus{}other} \PY{o}{=} \PY{n}{column\PYZus{}names}\PY{p}{[}\PY{n}{j}\PY{p}{]}
\PY{n}{column\PYZus{}values\PYZus{}other} \PY{o}{=} \PY{n}{ndarray\PYZus{}a}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{n}{j}\PY{p}{]}
\PY{c+c1}{\PYZsh{} These are numpy ndarrays}
\PY{n}{ndarray\PYZus{}temp\PYZus{}a} \PY{o}{=} \PY{n}{column\PYZus{}values\PYZus{}in\PYZus{}focus} \PY{o}{\PYZhy{}} \PY{n}{column\PYZus{}values\PYZus{}other}
\PY{n}{ndarray\PYZus{}temp\PYZus{}b} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{square}\PY{p}{(}\PY{n}{ndarray\PYZus{}temp\PYZus{}a}\PY{p}{)}
\PY{n}{sum\PYZus{}squared\PYZus{}deviations} \PY{o}{=} \PY{n}{ndarray\PYZus{}temp\PYZus{}b}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Put in a list so we can sort it.}
\PY{c+c1}{\PYZsh{} That way, we will have unique keys in the dictionary.}
\PY{c+c1}{\PYZsh{} (\PYZsq{}ADI\PYZsq{}, \PYZsq{}XLNX\PYZsq{}) is the same as (\PYZsq{}XLNX\PYZsq{}, \PYZsq{}ADI\PYZsq{}) in this case.}
\PY{c+c1}{\PYZsh{} Yes, the value for the key will be overriden, but it would be the}
\PY{c+c1}{\PYZsh{} same value.}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} After convert to a tuple. No real reason I can think of except convention.}
\PY{n}{list\PYZus{}pair\PYZus{}key} \PY{o}{=} \PY{n+nb}{sorted}\PY{p}{(}\PY{p}{[}\PY{n}{column\PYZus{}name\PYZus{}in\PYZus{}focus}\PY{p}{,} \PY{n}{column\PYZus{}name\PYZus{}other}\PY{p}{]}\PY{p}{)}
\PY{n}{tuple\PYZus{}pair\PYZus{}key} \PY{o}{=} \PY{n+nb}{tuple}\PY{p}{(}\PY{n}{list\PYZus{}pair\PYZus{}key}\PY{p}{)}
\PY{n}{dict\PYZus{}pairs}\PY{p}{[}\PY{n}{tuple\PYZus{}pair\PYZus{}key}\PY{p}{]} \PY{o}{=} \PY{n}{sum\PYZus{}squared\PYZus{}deviations}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}340}]:} \PY{n}{dict\PYZus{}pairs}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}340}]:} \{('ADI', 'AMAT'): 15.409621894273716,
('ADI', 'AMD'): 10.270124812815467,
('ADI', 'AVGO'): 7.410060308379357,
('ADI', 'INTC'): 3.728934838622128,
('ADI', 'KLAC'): 2.2691757219751896,
('ADI', 'LRCX'): 33.62975145123157,
('ADI', 'MCHP'): 4.393596211924288,
('ADI', 'MU'): 53.50393464508413,
('ADI', 'NVDA'): 52.403051521260295,
('ADI', 'QCOM'): 18.563161902462408,
('ADI', 'QRVO'): 2.0794069574152445,
('ADI', 'SWKS'): 6.976968857740386,
('ADI', 'TXN'): 1.9859323681800614,
('ADI', 'XLNX'): 0.8472312767165687,
('AMAT', 'AMD'): 20.67896462610036,
('AMAT', 'AVGO'): 3.7710241391412676,
('AMAT', 'INTC'): 26.80535021389236,
('AMAT', 'KLAC'): 8.348660336741,
('AMAT', 'LRCX'): 3.7335933812111457,
('AMAT', 'MCHP'): 4.27285769584317,
('AMAT', 'MU'): 13.121547700573586,
('AMAT', 'NVDA'): 13.855923845250377,
('AMAT', 'QCOM'): 61.494638709091895,
('AMAT', 'QRVO'): 13.435834239448647,
('AMAT', 'SWKS'): 7.672281410194768,
('AMAT', 'TXN'): 10.236713850295295,
('AMAT', 'XLNX'): 15.221760878707585,
('AMD', 'AVGO'): 10.4431315585542,
('AMD', 'INTC'): 24.18908167793324,
('AMD', 'KLAC'): 8.841407821806783,
('AMD', 'LRCX'): 36.569508087369414,
('AMD', 'MCHP'): 10.280931387897315,
('AMD', 'MU'): 56.65870410667582,
('AMD', 'NVDA'): 57.77420658558669,
('AMD', 'QCOM'): 45.91631724543036,
('AMD', 'QRVO'): 8.04230801921133,
('AMD', 'SWKS'): 6.032966330410986,
('AMD', 'TXN'): 15.330687075782489,
('AMD', 'XLNX'): 13.25472742746754,
('AVGO', 'INTC'): 17.86423039575638,
('AVGO', 'KLAC'): 3.004284756473002,
('AVGO', 'LRCX'): 12.793031283914337,
('AVGO', 'MCHP'): 1.234849025424745,
('AVGO', 'MU'): 27.133741892778875,
('AVGO', 'NVDA'): 26.56274085731252,
('AVGO', 'QCOM'): 45.49178117187881,
('AVGO', 'QRVO'): 5.011766778799581,
('AVGO', 'SWKS'): 1.822496536249247,
('AVGO', 'TXN'): 5.823712034045354,
('AVGO', 'XLNX'): 7.977478689436387,
('INTC', 'KLAC'): 9.50875360051205,
('INTC', 'LRCX'): 48.5941706790109,
('INTC', 'MCHP'): 13.122870250540794,
('INTC', 'MU'): 69.91548085987642,
('INTC', 'NVDA'): 67.45377571086769,
('INTC', 'QCOM'): 8.319012251502953,
('INTC', 'QRVO'): 8.822870331726378,
('INTC', 'SWKS'): 18.897228638971697,
('INTC', 'TXN'): 4.967780776727633,
('INTC', 'XLNX'): 3.595061680411969,
('KLAC', 'LRCX'): 22.425605324314212,
('KLAC', 'MCHP'): 1.59847575685719,
('KLAC', 'MU'): 39.78112631000559,
('KLAC', 'NVDA'): 41.14624070005211,
('KLAC', 'QCOM'): 30.63221134848983,
('KLAC', 'QRVO'): 1.8528020213566363,
('KLAC', 'SWKS'): 2.8486200736535103,
('KLAC', 'TXN'): 2.5557861098371353,
('KLAC', 'XLNX'): 2.9232627425123208,
('LRCX', 'MCHP'): 14.88119359096009,
('LRCX', 'MU'): 4.769782258097078,
('LRCX', 'NVDA'): 6.0678250636180735,
('LRCX', 'QCOM'): 93.26870518815028,
('LRCX', 'QRVO'): 29.563000037382487,
('LRCX', 'SWKS'): 18.63015370135768,
('LRCX', 'TXN'): 25.070280770539064,
('LRCX', 'XLNX'): 33.06343491474051,
('MCHP', 'MU'): 30.61626975853009,
('MCHP', 'NVDA'): 29.1033844279511,
('MCHP', 'QCOM'): 38.39356241080645,
('MCHP', 'QRVO'): 3.5434010166077172,
('MCHP', 'SWKS'): 2.457283560344462,
('MCHP', 'TXN'): 3.3422108791541403,
('MCHP', 'XLNX'): 4.531357897873351,
('MU', 'NVDA'): 6.555988006120443,
('MU', 'QCOM'): 122.20803404752033,
('MU', 'QRVO'): 50.213155565750945,
('MU', 'SWKS'): 36.15432210961284,
('MU', 'TXN'): 40.86788005521484,
('MU', 'XLNX'): 53.95390426700368,
('NVDA', 'QCOM'): 116.08385912068772,
('NVDA', 'QRVO'): 50.07346203994893,
('NVDA', 'SWKS'): 37.20756871720773,
('NVDA', 'TXN'): 39.93319524776679,
('NVDA', 'XLNX'): 50.390288994795405,
('QCOM', 'QRVO'): 26.69438443662439,
('QCOM', 'SWKS'): 45.28187095524986,
('QCOM', 'TXN'): 23.926189678303295,
('QCOM', 'XLNX'): 17.49680478495416,
('QRVO', 'SWKS'): 2.9291445432155454,
('QRVO', 'TXN'): 4.998382581875542,
('QRVO', 'XLNX'): 2.881268035181991,
('SWKS', 'TXN'): 8.354541060798446,
('SWKS', 'XLNX'): 8.205720456827613,
('TXN', 'XLNX'): 2.4415248210604705\}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}341}]:} \PY{c+c1}{\PYZsh{} Sort dictionary by value}
\PY{n}{d} \PY{o}{=} \PY{n}{dict\PYZus{}pairs}
\PY{c+c1}{\PYZsh{} Note: Dictionaries prior to Python 3.6 (I believe,) are not ordered.}
\PY{c+c1}{\PYZsh{} So returning in }
\PY{n}{list\PYZus{}minimized\PYZus{}sum\PYZus{}of\PYZus{}squared\PYZus{}deviations} \PY{o}{=} \PY{p}{[}\PY{p}{(}\PY{n}{k}\PY{p}{,} \PY{n}{d}\PY{p}{[}\PY{n}{k}\PY{p}{]}\PY{p}{)} \PY{k}{for} \PY{n}{k} \PY{o+ow}{in} \PY{n+nb}{sorted}\PY{p}{(}\PY{n}{d}\PY{p}{,} \PY{n}{key}\PY{o}{=}\PY{n}{d}\PY{o}{.}\PY{n}{get}\PY{p}{,} \PY{n}{reverse}\PY{o}{=}\PY{k+kc}{False}\PY{p}{)}\PY{p}{]}
\PY{n}{list\PYZus{}minimized\PYZus{}sum\PYZus{}of\PYZus{}squared\PYZus{}deviations}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}341}]:} [(('ADI', 'XLNX'), 0.8472312767165687),
(('AVGO', 'MCHP'), 1.234849025424745),
(('KLAC', 'MCHP'), 1.59847575685719),
(('AVGO', 'SWKS'), 1.822496536249247),
(('KLAC', 'QRVO'), 1.8528020213566363),
(('ADI', 'TXN'), 1.9859323681800614),
(('ADI', 'QRVO'), 2.0794069574152445),
(('ADI', 'KLAC'), 2.2691757219751896),
(('TXN', 'XLNX'), 2.4415248210604705),
(('MCHP', 'SWKS'), 2.457283560344462),
(('KLAC', 'TXN'), 2.5557861098371353),
(('KLAC', 'SWKS'), 2.8486200736535103),
(('QRVO', 'XLNX'), 2.881268035181991),
(('KLAC', 'XLNX'), 2.9232627425123208),
(('QRVO', 'SWKS'), 2.9291445432155454),
(('AVGO', 'KLAC'), 3.004284756473002),
(('MCHP', 'TXN'), 3.3422108791541403),
(('MCHP', 'QRVO'), 3.5434010166077172),
(('INTC', 'XLNX'), 3.595061680411969),
(('ADI', 'INTC'), 3.728934838622128),
(('AMAT', 'LRCX'), 3.7335933812111457),
(('AMAT', 'AVGO'), 3.7710241391412676),
(('AMAT', 'MCHP'), 4.27285769584317),
(('ADI', 'MCHP'), 4.393596211924288),
(('MCHP', 'XLNX'), 4.531357897873351),
(('LRCX', 'MU'), 4.769782258097078),
(('INTC', 'TXN'), 4.967780776727633),
(('QRVO', 'TXN'), 4.998382581875542),
(('AVGO', 'QRVO'), 5.011766778799581),
(('AVGO', 'TXN'), 5.823712034045354),
(('AMD', 'SWKS'), 6.032966330410986),
(('LRCX', 'NVDA'), 6.0678250636180735),
(('MU', 'NVDA'), 6.555988006120443),
(('ADI', 'SWKS'), 6.976968857740386),
(('ADI', 'AVGO'), 7.410060308379357),
(('AMAT', 'SWKS'), 7.672281410194768),
(('AVGO', 'XLNX'), 7.977478689436387),
(('AMD', 'QRVO'), 8.04230801921133),
(('SWKS', 'XLNX'), 8.205720456827613),
(('INTC', 'QCOM'), 8.319012251502953),
(('AMAT', 'KLAC'), 8.348660336741),
(('SWKS', 'TXN'), 8.354541060798446),
(('INTC', 'QRVO'), 8.822870331726378),
(('AMD', 'KLAC'), 8.841407821806783),
(('INTC', 'KLAC'), 9.50875360051205),
(('AMAT', 'TXN'), 10.236713850295295),
(('ADI', 'AMD'), 10.270124812815467),
(('AMD', 'MCHP'), 10.280931387897315),
(('AMD', 'AVGO'), 10.4431315585542),
(('AVGO', 'LRCX'), 12.793031283914337),
(('AMAT', 'MU'), 13.121547700573586),
(('INTC', 'MCHP'), 13.122870250540794),
(('AMD', 'XLNX'), 13.25472742746754),
(('AMAT', 'QRVO'), 13.435834239448647),
(('AMAT', 'NVDA'), 13.855923845250377),
(('LRCX', 'MCHP'), 14.88119359096009),
(('AMAT', 'XLNX'), 15.221760878707585),
(('AMD', 'TXN'), 15.330687075782489),
(('ADI', 'AMAT'), 15.409621894273716),
(('QCOM', 'XLNX'), 17.49680478495416),
(('AVGO', 'INTC'), 17.86423039575638),
(('ADI', 'QCOM'), 18.563161902462408),
(('LRCX', 'SWKS'), 18.63015370135768),
(('INTC', 'SWKS'), 18.897228638971697),
(('AMAT', 'AMD'), 20.67896462610036),
(('KLAC', 'LRCX'), 22.425605324314212),
(('QCOM', 'TXN'), 23.926189678303295),
(('AMD', 'INTC'), 24.18908167793324),
(('LRCX', 'TXN'), 25.070280770539064),
(('AVGO', 'NVDA'), 26.56274085731252),
(('QCOM', 'QRVO'), 26.69438443662439),
(('AMAT', 'INTC'), 26.80535021389236),
(('AVGO', 'MU'), 27.133741892778875),
(('MCHP', 'NVDA'), 29.1033844279511),
(('LRCX', 'QRVO'), 29.563000037382487),
(('MCHP', 'MU'), 30.61626975853009),
(('KLAC', 'QCOM'), 30.63221134848983),
(('LRCX', 'XLNX'), 33.06343491474051),
(('ADI', 'LRCX'), 33.62975145123157),
(('MU', 'SWKS'), 36.15432210961284),
(('AMD', 'LRCX'), 36.569508087369414),
(('NVDA', 'SWKS'), 37.20756871720773),
(('MCHP', 'QCOM'), 38.39356241080645),
(('KLAC', 'MU'), 39.78112631000559),
(('NVDA', 'TXN'), 39.93319524776679),
(('MU', 'TXN'), 40.86788005521484),
(('KLAC', 'NVDA'), 41.14624070005211),
(('QCOM', 'SWKS'), 45.28187095524986),
(('AVGO', 'QCOM'), 45.49178117187881),
(('AMD', 'QCOM'), 45.91631724543036),
(('INTC', 'LRCX'), 48.5941706790109),
(('NVDA', 'QRVO'), 50.07346203994893),
(('MU', 'QRVO'), 50.213155565750945),
(('NVDA', 'XLNX'), 50.390288994795405),
(('ADI', 'NVDA'), 52.403051521260295),
(('ADI', 'MU'), 53.50393464508413),
(('MU', 'XLNX'), 53.95390426700368),
(('AMD', 'MU'), 56.65870410667582),
(('AMD', 'NVDA'), 57.77420658558669),
(('AMAT', 'QCOM'), 61.494638709091895),
(('INTC', 'NVDA'), 67.45377571086769),
(('INTC', 'MU'), 69.91548085987642),
(('LRCX', 'QCOM'), 93.26870518815028),
(('NVDA', 'QCOM'), 116.08385912068772),
(('MU', 'QCOM'), 122.20803404752033)]
\end{Verbatim}
\subsubsection{Minimized Sum of Squared Deviations between Main industry
and Related industry - Software and
Semiconductors}\label{minimized-sum-of-squared-deviations-between-main-industry-and-related-industry---software-and-semiconductors}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}342}]:} \PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/39203662/euclidean\PYZhy{}distance\PYZhy{}matrix\PYZhy{}using\PYZhy{}pandas}
\PY{c+c1}{\PYZsh{}pd.DataFrame( , columns = df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors.columns, index = df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors.index)}
\PY{c+c1}{\PYZsh{} https://stackoverflow.com/questions/41337316/pandas\PYZhy{}compare\PYZhy{}all\PYZhy{}dataframe\PYZhy{}columns\PYZhy{}with\PYZhy{}eachother}
\PY{n}{df\PYZus{}a} \PY{o}{=} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}software}\PY{o}{.}\PY{n}{copy}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}b} \PY{o}{=} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors}\PY{o}{.}\PY{n}{copy}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} This is a numpy array}
\PY{n}{ndarray\PYZus{}a} \PY{o}{=} \PY{n}{df\PYZus{}a}\PY{o}{.}\PY{n}{values}
\PY{n}{column\PYZus{}names\PYZus{}a} \PY{o}{=} \PY{n}{df\PYZus{}a}\PY{o}{.}\PY{n}{columns}
\PY{c+c1}{\PYZsh{}}
\PY{n}{ndarray\PYZus{}b} \PY{o}{=} \PY{n}{df\PYZus{}b}\PY{o}{.}\PY{n}{values}
\PY{n}{column\PYZus{}names\PYZus{}b} \PY{o}{=} \PY{n}{df\PYZus{}b}\PY{o}{.}\PY{n}{columns}
\PY{c+c1}{\PYZsh{} Multiplying every column by every other column}
\PY{c+c1}{\PYZsh{}list\PYZus{}for\PYZus{}matrix\PYZus{}a = []}
\PY{n}{dict\PYZus{}pairs} \PY{o}{=} \PY{p}{\PYZob{}}\PY{p}{\PYZcb{}}
\PY{k}{for} \PY{n}{i} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{ndarray\PYZus{}a}\PY{o}{.}\PY{n}{T}\PY{p}{)}\PY{p}{)}\PY{p}{:}
\PY{n}{column\PYZus{}name\PYZus{}in\PYZus{}focus} \PY{o}{=} \PY{n}{column\PYZus{}names\PYZus{}a}\PY{p}{[}\PY{n}{i}\PY{p}{]}
\PY{n}{column\PYZus{}values\PYZus{}in\PYZus{}focus} \PY{o}{=} \PY{n}{ndarray\PYZus{}a}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{n}{i}\PY{p}{]}
\PY{c+c1}{\PYZsh{} The other columns}
\PY{k}{for} \PY{n}{j} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{n+nb}{len}\PY{p}{(}\PY{n}{ndarray\PYZus{}b}\PY{o}{.}\PY{n}{T}\PY{p}{)}\PY{p}{)}\PY{p}{:}
\PY{c+c1}{\PYZsh{} \PYZsh{} Skip comparison to itself }
\PY{c+c1}{\PYZsh{} if j == i:}
\PY{c+c1}{\PYZsh{} continue}
\PY{n}{column\PYZus{}name\PYZus{}other} \PY{o}{=} \PY{n}{column\PYZus{}names\PYZus{}b}\PY{p}{[}\PY{n}{j}\PY{p}{]}
\PY{n}{column\PYZus{}values\PYZus{}other} \PY{o}{=} \PY{n}{ndarray\PYZus{}b}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{n}{j}\PY{p}{]}
\PY{c+c1}{\PYZsh{} These are numpy ndarrays}
\PY{n}{ndarray\PYZus{}temp\PYZus{}a} \PY{o}{=} \PY{n}{column\PYZus{}values\PYZus{}in\PYZus{}focus} \PY{o}{\PYZhy{}} \PY{n}{column\PYZus{}values\PYZus{}other}
\PY{n}{ndarray\PYZus{}temp\PYZus{}b} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{square}\PY{p}{(}\PY{n}{ndarray\PYZus{}temp\PYZus{}a}\PY{p}{)}
\PY{n}{sum\PYZus{}squared\PYZus{}deviations} \PY{o}{=} \PY{n}{ndarray\PYZus{}temp\PYZus{}b}\PY{o}{.}\PY{n}{sum}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Put in a list so we can sort it.}
\PY{c+c1}{\PYZsh{} That way, we will have unique keys in the dictionary.}
\PY{c+c1}{\PYZsh{} (\PYZsq{}ADI\PYZsq{}, \PYZsq{}XLNX\PYZsq{}) is the same as (\PYZsq{}XLNX\PYZsq{}, \PYZsq{}ADI\PYZsq{}) in this case.}
\PY{c+c1}{\PYZsh{} Yes, the value for the key will be overriden, but it would be the}
\PY{c+c1}{\PYZsh{} same value.}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} After convert to a tuple. No real reason I can think of except convention.}
\PY{c+c1}{\PYZsh{}list\PYZus{}pair\PYZus{}key = sorted([column\PYZus{}name\PYZus{}in\PYZus{}focus, column\PYZus{}name\PYZus{}other])}
\PY{n}{tuple\PYZus{}pair\PYZus{}key} \PY{o}{=} \PY{p}{(}\PY{n}{column\PYZus{}name\PYZus{}in\PYZus{}focus}\PY{p}{,} \PY{n}{column\PYZus{}name\PYZus{}other}\PY{p}{)}
\PY{n}{dict\PYZus{}pairs}\PY{p}{[}\PY{n}{tuple\PYZus{}pair\PYZus{}key}\PY{p}{]} \PY{o}{=} \PY{n}{sum\PYZus{}squared\PYZus{}deviations}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}343}]:} \PY{n}{dict\PYZus{}pairs}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}343}]:} \{('ADBE', 'ADI'): 15.641497452290585,
('ADBE', 'AMAT'): 1.2800980451057407,
('ADBE', 'AMD'): 21.141148492516677,
('ADBE', 'AVGO'): 3.229031816092198,
('ADBE', 'INTC'): 26.603769805898253,
('ADBE', 'KLAC'): 8.983868763624907,
('ADBE', 'LRCX'): 4.947437744266467,
('ADBE', 'MCHP'): 4.7534664617861475,
('ADBE', 'MU'): 14.015114483544183,
('ADBE', 'NVDA'): 14.090597686215157,
('ADBE', 'QCOM'): 60.97321968260145,
('ADBE', 'QRVO'): 13.91965734214309,
('ADBE', 'SWKS'): 8.013527852182303,
('ADBE', 'TXN'): 9.743496085765168,
('ADBE', 'XLNX'): 15.995232558511132,
('ADP', 'ADI'): 3.3399481439441305,
('ADP', 'AMAT'): 29.55695293245271,
('ADP', 'AMD'): 19.534769843996934,
('ADP', 'AVGO'): 18.21057535276634,
('ADP', 'INTC'): 1.513227017157409,
('ADP', 'KLAC'): 9.546610453832688,
('ADP', 'LRCX'): 52.833379866900344,
('ADP', 'MCHP'): 13.297060351310222,
('ADP', 'MU'): 77.29454456617162,
('ADP', 'NVDA'): 72.71541042059124,
('ADP', 'QCOM'): 7.667910811503187,
('ADP', 'QRVO'): 7.965328007054493,
('ADP', 'SWKS'): 17.82225497072124,
('ADP', 'TXN'): 6.608582231432001,
('ADP', 'XLNX'): 3.1367306779098536,
('ADSK', 'ADI'): 9.550995428644642,
('ADSK', 'AMAT'): 3.1305312474565037,
('ADSK', 'AMD'): 13.965940041283234,
('ADSK', 'AVGO'): 1.8915725819144973,
('ADSK', 'INTC'): 20.185431992966514,
('ADSK', 'KLAC'): 5.179947441698206,
('ADSK', 'LRCX'): 10.712682283806249,
('ADSK', 'MCHP'): 1.7208553397356319,
('ADSK', 'MU'): 26.27962743127616,
('ADSK', 'NVDA'): 22.735921675969003,
('ADSK', 'QCOM'): 48.4727188419712,
('ADSK', 'QRVO'): 6.559853702580115,
('ADSK', 'SWKS'): 3.47605743380632,
('ADSK', 'TXN'): 8.245649450023652,
('ADSK', 'XLNX'): 8.688691070489895,
('AKAM', 'ADI'): 36.80614763565199,
('AKAM', 'AMAT'): 95.54572746289244,
('AKAM', 'AMD'): 65.99217019899946,
('AKAM', 'AVGO'): 74.57691760935943,
('AKAM', 'INTC'): 22.18151190769331,
('AKAM', 'KLAC'): 53.65149329006273,
('AKAM', 'LRCX'): 134.67021174233633,
('AKAM', 'MCHP'): 64.97435841816045,
('AKAM', 'MU'): 168.02243780617792,
('AKAM', 'NVDA'): 162.55528571903338,
('AKAM', 'QCOM'): 5.397042405948808,
('AKAM', 'QRVO'): 48.090892214536396,
('AKAM', 'SWKS'): 71.71002966893637,
('AKAM', 'TXN'): 45.15239789357456,
('AKAM', 'XLNX'): 36.77129923927621,
('ANSS', 'ADI'): 11.705766345503283,
('ANSS', 'AMAT'): 1.8386290662095512,
('ANSS', 'AMD'): 18.06570273178554,
('ANSS', 'AVGO'): 1.5230308747745935,
('ANSS', 'INTC'): 21.74741223760421,
('ANSS', 'KLAC'): 5.995556859729725,
('ANSS', 'LRCX'): 7.936326621368339,
('ANSS', 'MCHP'): 2.971177160670713,
('ANSS', 'MU'): 19.198469575621022,
('ANSS', 'NVDA'): 18.617339876833864,
('ANSS', 'QCOM'): 52.42964327275603,
('ANSS', 'QRVO'): 9.963992834470323,
('ANSS', 'SWKS'): 5.708933420619753,
('ANSS', 'TXN'): 7.2347660308521835,
('ANSS', 'XLNX'): 11.830091672421048,
('ATVI', 'ADI'): 33.61372729988623,
('ATVI', 'AMAT'): 7.122805862797942,
('ATVI', 'AMD'): 28.10278074203699,
('ATVI', 'AVGO'): 10.584802766357736,
('ATVI', 'INTC'): 53.19064463347061,
('ATVI', 'KLAC'): 21.551867317891215,
('ATVI', 'LRCX'): 4.964722388916429,
('ATVI', 'MCHP'): 14.488160438511093,
('ATVI', 'MU'): 12.654621517069684,
('ATVI', 'NVDA'): 12.450756584249177,
('ATVI', 'QCOM'): 96.58766474067346,
('ATVI', 'QRVO'): 27.41276718406751,
('ATVI', 'SWKS'): 14.613960720782337,
('ATVI', 'TXN'): 27.81844331361608,
('ATVI', 'XLNX'): 33.92204891825767,
('CA', 'ADI'): 5.445553025342331,
('CA', 'AMAT'): 36.49507269746307,
('CA', 'AMD'): 20.73605408420914,
('CA', 'AVGO'): 22.746763651316854,
('CA', 'INTC'): 3.445512618519095,
('CA', 'KLAC'): 12.489029290514786,
('CA', 'LRCX'): 62.2387275194252,
('CA', 'MCHP'): 17.589221451485724,
('CA', 'MU'): 89.04651743061828,
('CA', 'NVDA'): 84.39443333912233,
('CA', 'QCOM'): 5.947465216980399,
('CA', 'QRVO'): 9.818295962133433,
('CA', 'SWKS'): 21.009716682953986,
('CA', 'TXN'): 10.752124544886751,
('CA', 'XLNX'): 4.962389858707814,
('CDNS', 'ADI'): 20.64316526927612,
('CDNS', 'AMAT'): 1.4409675059472304,
('CDNS', 'AMD'): 22.751348532462618,
('CDNS', 'AVGO'): 5.161811092239464,
('CDNS', 'INTC'): 34.19982518433737,
('CDNS', 'KLAC'): 12.486160085666137,
('CDNS', 'LRCX'): 2.786202384120178,
('CDNS', 'MCHP'): 7.015705353059896,
('CDNS', 'MU'): 10.003787537836825,
('CDNS', 'NVDA'): 11.428964086685394,
('CDNS', 'QCOM'): 72.89573389114403,
('CDNS', 'QRVO'): 17.888684971584798,
('CDNS', 'SWKS'): 9.810187459426341,
('CDNS', 'TXN'): 14.4872169329341,
('CDNS', 'XLNX'): 21.40032723887533,
('CRM', 'ADI'): 1.997656016697197,
('CRM', 'AMAT'): 7.6193332473852555,
('CRM', 'AMD'): 11.724097423669896,
('CRM', 'AVGO'): 2.6179215903960937,
('CRM', 'INTC'): 7.446315317339748,
('CRM', 'KLAC'): 1.3086781125145386,
('CRM', 'LRCX'): 20.941885192615864,
('CRM', 'MCHP'): 1.20869169961224,
('CRM', 'MU'): 37.57257377897888,
('CRM', 'NVDA'): 36.19835380367905,
('CRM', 'QCOM'): 28.24636354741675,
('CRM', 'QRVO'): 2.6604460084357497,
('CRM', 'SWKS'): 4.313964227859302,
('CRM', 'TXN'): 1.0856977608231528,
('CRM', 'XLNX'): 2.2146502472736547,
('CTXS', 'ADI'): 1.7111418190556513,
('CTXS', 'AMAT'): 24.355743725844047,
('CTXS', 'AMD'): 14.091955561162326,
('CTXS', 'AVGO'): 13.109353439176544,
('CTXS', 'INTC'): 2.7353165635824075,
('CTXS', 'KLAC'): 5.217057967076028,
('CTXS', 'LRCX'): 46.31669601666305,
('CTXS', 'MCHP'): 10.00839196776698,
('CTXS', 'MU'): 68.9365041143938,
('CTXS', 'NVDA'): 68.519632570599,
('CTXS', 'QCOM'): 11.870586808569865,
('CTXS', 'QRVO'): 4.297245266774303,
('CTXS', 'SWKS'): 11.917604880251496,
('CTXS', 'TXN'): 4.868710949397263,
('CTXS', 'XLNX'): 2.368944712068714,
('EA', 'ADI'): 10.632559611184648,
('EA', 'AMAT'): 5.67038492363221,
('EA', 'AMD'): 11.369723342340796,
('EA', 'AVGO'): 2.226087984222831,
('EA', 'INTC'): 23.2365422708407,
('EA', 'KLAC'): 5.603884866043956,
('EA', 'LRCX'): 13.922809590464553,
('EA', 'MCHP'): 2.381116610056907,
('EA', 'MU'): 31.16913827811288,
('EA', 'NVDA'): 26.652673080545902,
('EA', 'QCOM'): 51.25816088996295,
('EA', 'QRVO'): 7.080377861068116,
('EA', 'SWKS'): 3.0018709650252138,
('EA', 'TXN'): 10.291513338129036,
('EA', 'XLNX'): 9.964181785582714,
('EBAY', 'ADI'): 0.6669700061807349,
('EBAY', 'AMAT'): 16.369847413421255,
('EBAY', 'AMD'): 9.470492419121397,
('EBAY', 'AVGO'): 7.3766072626476955,
('EBAY', 'INTC'): 4.844847353878416,
('EBAY', 'KLAC'): 2.5157133544305084,
('EBAY', 'LRCX'): 34.66476798597868,
('EBAY', 'MCHP'): 4.4358271633060635,
('EBAY', 'MU'): 55.78390231749456,
('EBAY', 'NVDA'): 53.122504526800135,
('EBAY', 'QCOM'): 19.3054523972676,
('EBAY', 'QRVO'): 2.277695702694797,
('EBAY', 'SWKS'): 6.773082305246333,
('EBAY', 'TXN'): 3.0875436703530985,
('EBAY', 'XLNX'): 0.9172979723184711,
('FB', 'ADI'): 4.310549648198803,
('FB', 'AMAT'): 5.355019798959194,
('FB', 'AMD'): 11.381937533598368,
('FB', 'AVGO'): 1.3819220813233803,
('FB', 'INTC'): 11.62931972616743,
('FB', 'KLAC'): 2.2368750663683565,
('FB', 'LRCX'): 16.302038846448006,
('FB', 'MCHP'): 0.6189008071597084,
('FB', 'MU'): 32.511727562217175,
('FB', 'NVDA'): 29.91482373000176,
('FB', 'QCOM'): 35.70497897079473,
('FB', 'QRVO'): 4.048544380735673,
('FB', 'SWKS'): 3.4153550499609855,
('FB', 'TXN'): 2.877096600387234,
('FB', 'XLNX'): 4.22918299708272,
('FIS', 'ADI'): 0.9721460842565901,
('FIS', 'AMAT'): 18.883272204506873,
('FIS', 'AMD'): 13.099156992395116,
('FIS', 'AVGO'): 9.65874003260073,
('FIS', 'INTC'): 2.948774328347077,
('FIS', 'KLAC'): 4.200469503274639,
('FIS', 'LRCX'): 38.08838038691995,
('FIS', 'MCHP'): 6.127472044001941,
('FIS', 'MU'): 60.3922441819914,
('FIS', 'NVDA'): 56.141610858453,
('FIS', 'QCOM'): 15.050310033931911,
('FIS', 'QRVO'): 3.4764696998930686,
('FIS', 'SWKS'): 9.631628850034886,
('FIS', 'TXN'): 3.241467900080252,
('FIS', 'XLNX'): 0.6465886000734158,
('FISV', 'ADI'): 0.8126773010290321,
('FISV', 'AMAT'): 19.123786755704163,
('FISV', 'AMD'): 12.297532416462612,
('FISV', 'AVGO'): 9.366093956502034,
('FISV', 'INTC'): 3.22238329364667,
('FISV', 'KLAC'): 3.5291831245484273,
('FISV', 'LRCX'): 38.787350179774954,
('FISV', 'MCHP'): 6.276913817659961,
('FISV', 'MU'): 61.0502194903199,
('FISV', 'NVDA'): 58.00668154104851,
('FISV', 'QCOM'): 14.854789074063332,
('FISV', 'QRVO'): 2.8842466377820197,
('FISV', 'SWKS'): 8.990832337676242,
('FISV', 'TXN'): 3.326089525756251,
('FISV', 'XLNX'): 0.7275039920196817,
('GOOG', 'ADI'): 1.1283331094134528,
('GOOG', 'AMAT'): 12.387602835021475,
('GOOG', 'AMD'): 13.05014283097869,
('GOOG', 'AVGO'): 5.433618853208897,
('GOOG', 'INTC'): 4.572231565891804,
('GOOG', 'KLAC'): 1.7678068660222688,
('GOOG', 'LRCX'): 28.827379638643123,
('GOOG', 'MCHP'): 3.263660244777946,
('GOOG', 'MU'): 48.13536028587864,
('GOOG', 'NVDA'): 45.37886778456493,
('GOOG', 'QCOM'): 20.56211397569321,
('GOOG', 'QRVO'): 2.6492552874095923,
('GOOG', 'SWKS'): 6.6086921281843525,
('GOOG', 'TXN'): 1.399497980887719,
('GOOG', 'XLNX'): 0.6938522819778221,
('GOOGL', 'ADI'): 0.9991659273680565,
('GOOGL', 'AMAT'): 13.6722252827118,
('GOOGL', 'AMD'): 13.200857208214854,
('GOOGL', 'AVGO'): 6.221321566606663,
('GOOGL', 'INTC'): 4.051446129603078,
('GOOGL', 'KLAC'): 2.0612747556407918,
('GOOGL', 'LRCX'): 30.80466655990735,
('GOOGL', 'MCHP'): 3.865271615523935,
('GOOGL', 'MU'): 50.74060670955242,
('GOOGL', 'NVDA'): 47.857521548210244,
('GOOGL', 'QCOM'): 19.031407198249152,
('GOOGL', 'QRVO'): 2.6353344814422806,
('GOOGL', 'SWKS'): 7.124572235900465,
('GOOGL', 'TXN'): 1.6840029125995648,
('GOOGL', 'XLNX'): 0.5245130065173396,
('INTU', 'ADI'): 1.751296011542319,
('INTU', 'AMAT'): 9.586137473219715,
('INTU', 'AMD'): 13.539439140899896,
('INTU', 'AVGO'): 4.071252478781896,
('INTU', 'INTC'): 5.927393857176819,
('INTU', 'KLAC'): 1.9821576343612868,
('INTU', 'LRCX'): 24.105139605998502,
('INTU', 'MCHP'): 2.022870270040184,
('INTU', 'MU'): 41.5971311228647,
('INTU', 'NVDA'): 38.44431668259838,
('INTU', 'QCOM'): 24.550801037955857,
('INTU', 'QRVO'): 3.299520483719693,
('INTU', 'SWKS'): 6.186094457908639,
('INTU', 'TXN'): 1.0660962802519576,
('INTU', 'XLNX'): 1.4400592840000366,
('MA', 'ADI'): 2.935080982147751,
('MA', 'AMAT'): 7.618397641012901,
('MA', 'AMD'): 15.096972970089018,
('MA', 'AVGO'): 4.074457220782927,
('MA', 'INTC'): 7.175343403904266,
('MA', 'KLAC'): 2.778926360724334,
('MA', 'LRCX'): 20.448689367332328,
('MA', 'MCHP'): 1.7521076086846996,
('MA', 'MU'): 36.66557007389974,
('MA', 'NVDA'): 33.3619426658736,
('MA', 'QCOM'): 27.85879720917311,
('MA', 'QRVO'): 4.865818609764919,
('MA', 'SWKS'): 6.9578593037274254,
('MA', 'TXN'): 1.0191323715355078,
('MA', 'XLNX'): 2.4274168830795517,
('MSFT', 'ADI'): 1.8803364815253043,
('MSFT', 'AMAT'): 9.217085500885672,
('MSFT', 'AMD'): 14.129795736113532,
('MSFT', 'AVGO'): 4.122631744688235,
('MSFT', 'INTC'): 5.500999430446283,
('MSFT', 'KLAC'): 2.053888970457047,
('MSFT', 'LRCX'): 23.43085212265243,
('MSFT', 'MCHP'): 2.220780241314992,
('MSFT', 'MU'): 40.37932016642564,
('MSFT', 'NVDA'): 37.92775234110368,
('MSFT', 'QCOM'): 24.666532088192184,
('MSFT', 'QRVO'): 3.7170582173486455,
('MSFT', 'SWKS'): 6.482207717384746,
('MSFT', 'TXN'): 0.5759653537096523,
('MSFT', 'XLNX'): 1.75269278207508,
('NFLX', 'ADI'): 4.17577528613352,
('NFLX', 'AMAT'): 6.076876469767402,
('NFLX', 'AMD'): 14.343703256756674,
('NFLX', 'AVGO'): 3.4856634117804353,
('NFLX', 'INTC'): 9.802561115281504,
('NFLX', 'KLAC'): 3.0468563044287507,
('NFLX', 'LRCX'): 17.660767634811247,
('NFLX', 'MCHP'): 1.4663045721183574,
('NFLX', 'MU'): 33.9442862602299,
('NFLX', 'NVDA'): 29.983751208738926,
('NFLX', 'QCOM'): 32.61305961899776,
('NFLX', 'QRVO'): 5.465229138847734,
('NFLX', 'SWKS'): 6.08352444983263,
('NFLX', 'TXN'): 1.9400265053862158,
('NFLX', 'XLNX'): 3.6519196840191097,
('NTAP', 'ADI'): 6.955848572814682,
('NTAP', 'AMAT'): 9.534159756510446,
('NTAP', 'AMD'): 18.791851847270415,
('NTAP', 'AVGO'): 6.141731750548153,
('NTAP', 'INTC'): 11.313983355275052,
('NTAP', 'KLAC'): 5.622385025555119,
('NTAP', 'LRCX'): 21.859420192094536,
('NTAP', 'MCHP'): 5.934168240322908,
('NTAP', 'MU'): 33.359871632141434,
('NTAP', 'NVDA'): 34.49513158308581,
('NTAP', 'QCOM'): 34.06382149429426,
('NTAP', 'QRVO'): 9.900726491281485,
('NTAP', 'SWKS'): 10.95204457640072,
('NTAP', 'TXN'): 2.4964700617261997,
('NTAP', 'XLNX'): 8.196438205872427,
('ORCL', 'ADI'): 2.237859080294177,
('ORCL', 'AMAT'): 11.36725985738814,
('ORCL', 'AMD'): 7.744808769555508,
('ORCL', 'AVGO'): 3.628280724780087,
('ORCL', 'INTC'): 9.127402312076551,
('ORCL', 'KLAC'): 1.9666625510388887,
('ORCL', 'LRCX'): 26.3380801544425,
('ORCL', 'MCHP'): 2.3649409870550215,
('ORCL', 'MU'): 46.32812837336788,
('ORCL', 'NVDA'): 43.57909500699902,
('ORCL', 'QCOM'): 27.748789920419252,
('ORCL', 'QRVO'): 2.092417050465725,
('ORCL', 'SWKS'): 3.4639988164450752,
('ORCL', 'TXN'): 3.8927966052404903,
('ORCL', 'XLNX'): 2.3265285796604918,
('PAYX', 'ADI'): 5.273245677432621,
('PAYX', 'AMAT'): 35.66660509503371,
('PAYX', 'AMD'): 24.34207539586413,
('PAYX', 'AVGO'): 23.43063128659174,
('PAYX', 'INTC'): 1.4694648674078978,
('PAYX', 'KLAC'): 12.674199363197296,
('PAYX', 'LRCX'): 61.19587107091768,
('PAYX', 'MCHP'): 17.994460326854544,
('PAYX', 'MU'): 85.7109809119928,
('PAYX', 'NVDA'): 82.68973605434279,
('PAYX', 'QCOM'): 4.575606421432423,
('PAYX', 'QRVO'): 10.843215219243742,
('PAYX', 'SWKS'): 22.880415869794682,
('PAYX', 'TXN'): 9.008339362042072,
('PAYX', 'XLNX'): 5.247483808508269,
('RHT', 'ADI'): 18.009576455547833,
('RHT', 'AMAT'): 1.3902993520505271,
('RHT', 'AMD'): 23.50427092505474,
('RHT', 'AVGO'): 5.235385460682404,
('RHT', 'INTC'): 28.99042315171751,
('RHT', 'KLAC'): 11.462190235741506,
('RHT', 'LRCX'): 3.9347514482060673,
('RHT', 'MCHP'): 6.109404247131482,
('RHT', 'MU'): 11.193967715622387,
('RHT', 'NVDA'): 11.629685197164665,
('RHT', 'QCOM'): 65.23114304984585,
('RHT', 'QRVO'): 17.13480950272999,
('RHT', 'SWKS'): 10.794234160554456,
('RHT', 'TXN'): 11.531388438403074,
('RHT', 'XLNX'): 18.163783871728363,
('SNPS', 'ADI'): 4.665030522378919,
('SNPS', 'AMAT'): 4.2111509211811295,
('SNPS', 'AMD'): 10.63366920282201,
('SNPS', 'AVGO'): 0.9195603825634844,
('SNPS', 'INTC'): 13.039591357224342,
('SNPS', 'KLAC'): 1.7330298854192816,
('SNPS', 'LRCX'): 14.612695806918683,
('SNPS', 'MCHP'): 0.5986578446501867,
('SNPS', 'MU'): 28.968260437077348,
('SNPS', 'NVDA'): 29.40447854573659,
('SNPS', 'QCOM'): 38.778548066039626,
('SNPS', 'QRVO'): 4.085032654272124,
('SNPS', 'SWKS'): 2.766974459468903,
('SNPS', 'TXN'): 2.9403693350312694,
('SNPS', 'XLNX'): 5.42967222082347,
('SYMC', 'ADI'): 2.2853977924112274,
('SYMC', 'AMAT'): 22.871462838930405,
('SYMC', 'AMD'): 9.402337134118063,
('SYMC', 'AVGO'): 11.911479971425239,
('SYMC', 'INTC'): 6.557964616085342,
('SYMC', 'KLAC'): 4.687166171826333,
('SYMC', 'LRCX'): 43.40338461552502,
('SYMC', 'MCHP'): 8.299938487398158,
('SYMC', 'MU'): 67.68017770371281,
('SYMC', 'NVDA'): 66.75494128562804,
('SYMC', 'QCOM'): 18.49612606499751,
('SYMC', 'QRVO'): 3.1271746259948285,
('SYMC', 'SWKS'): 8.6933873801736,
('SYMC', 'TXN'): 7.075507223722063,
('SYMC', 'XLNX'): 2.766619347766248,
('TSS', 'ADI'): 3.6725300573839,
('TSS', 'AMAT'): 9.131078389941319,
('TSS', 'AMD'): 17.737619708919453,
('TSS', 'AVGO'): 5.100992735044828,
('TSS', 'INTC'): 6.8381829885920435,
('TSS', 'KLAC'): 3.96344097435693,
('TSS', 'LRCX'): 22.172386708097605,
('TSS', 'MCHP'): 2.929578489308252,
('TSS', 'MU'): 38.311821236585786,
('TSS', 'NVDA'): 34.25145354298562,
('TSS', 'QCOM'): 26.827964552473546,
('TSS', 'QRVO'): 6.234948323089551,
('TSS', 'SWKS'): 8.900870412229509,
('TSS', 'TXN'): 1.1467625205748755,
('TSS', 'XLNX'): 3.167828834732221,
('V', 'ADI'): 2.517895089161541,
('V', 'AMAT'): 6.996183783066659,
('V', 'AMD'): 11.675134951725434,
('V', 'AVGO'): 2.4480915503312657,
('V', 'INTC'): 8.405913278598494,
('V', 'KLAC'): 1.6613098801675854,
('V', 'LRCX'): 19.694890290000835,
('V', 'MCHP'): 0.9116461786887379,
('V', 'MU'): 36.16409052659907,
('V', 'NVDA'): 33.86405899286408,
('V', 'QCOM'): 30.091350717807885,
('V', 'QRVO'): 3.423067256173825,
('V', 'SWKS'): 4.4877857185558785,
('V', 'TXN'): 1.2447344545367391,
('V', 'XLNX'): 2.652737851307526,
('VRSN', 'ADI'): 3.06579512064087,
('VRSN', 'AMAT'): 6.7414531805595095,
('VRSN', 'AMD'): 12.83430263635474,
('VRSN', 'AVGO'): 2.738503779686982,
('VRSN', 'INTC'): 8.449443787143437,
('VRSN', 'KLAC'): 2.1684262007897592,
('VRSN', 'LRCX'): 19.08250883933981,
('VRSN', 'MCHP'): 1.2180349493319742,
('VRSN', 'MU'): 35.06684092556682,
('VRSN', 'NVDA'): 32.907270434259715,
('VRSN', 'QCOM'): 30.26595065139195,
('VRSN', 'QRVO'): 4.0625107553148485,
('VRSN', 'SWKS'): 5.213495731194438,
('VRSN', 'TXN'): 1.5696648850894575,
('VRSN', 'XLNX'): 2.8965990255744365,
('WU', 'ADI'): 12.83749513294439,
('WU', 'AMAT'): 53.43619825071505,
('WU', 'AMD'): 33.18207365023986,
('WU', 'AVGO'): 37.30204098086463,
('WU', 'INTC'): 6.444176386626403,
('WU', 'KLAC'): 23.33664694696038,
('WU', 'LRCX'): 83.75062683089261,
('WU', 'MCHP'): 30.394019960623993,
('WU', 'MU'): 113.82915436034669,
('WU', 'NVDA'): 108.20750814688978,
('WU', 'QCOM'): 2.0307105617280126,
('WU', 'QRVO'): 19.073607208988577,
('WU', 'SWKS'): 34.76551732113953,
('WU', 'TXN'): 19.515134819031534,
('WU', 'XLNX'): 12.255954726885394\}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}344}]:} \PY{c+c1}{\PYZsh{} Sort dictionary by value}
\PY{n}{d} \PY{o}{=} \PY{n}{dict\PYZus{}pairs}
\PY{c+c1}{\PYZsh{} Note: Dictionaries prior to Python 3.6 (I believe,) are not ordered.}
\PY{c+c1}{\PYZsh{} So returning in }
\PY{n}{list\PYZus{}minimized\PYZus{}sum\PYZus{}of\PYZus{}squared\PYZus{}deviations} \PY{o}{=} \PY{p}{[}\PY{p}{(}\PY{n}{k}\PY{p}{,} \PY{n}{d}\PY{p}{[}\PY{n}{k}\PY{p}{]}\PY{p}{)} \PY{k}{for} \PY{n}{k} \PY{o+ow}{in} \PY{n+nb}{sorted}\PY{p}{(}\PY{n}{d}\PY{p}{,} \PY{n}{key}\PY{o}{=}\PY{n}{d}\PY{o}{.}\PY{n}{get}\PY{p}{,} \PY{n}{reverse}\PY{o}{=}\PY{k+kc}{False}\PY{p}{)}\PY{p}{]}
\PY{n}{list\PYZus{}minimized\PYZus{}sum\PYZus{}of\PYZus{}squared\PYZus{}deviations}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}344}]:} [(('GOOGL', 'XLNX'), 0.5245130065173396),
(('MSFT', 'TXN'), 0.5759653537096523),
(('SNPS', 'MCHP'), 0.5986578446501867),
(('FB', 'MCHP'), 0.6189008071597084),
(('FIS', 'XLNX'), 0.6465886000734158),
(('EBAY', 'ADI'), 0.6669700061807349),
(('GOOG', 'XLNX'), 0.6938522819778221),
(('FISV', 'XLNX'), 0.7275039920196817),
(('FISV', 'ADI'), 0.8126773010290321),
(('V', 'MCHP'), 0.9116461786887379),
(('EBAY', 'XLNX'), 0.9172979723184711),
(('SNPS', 'AVGO'), 0.9195603825634844),
(('FIS', 'ADI'), 0.9721460842565901),
(('GOOGL', 'ADI'), 0.9991659273680565),
(('MA', 'TXN'), 1.0191323715355078),
(('INTU', 'TXN'), 1.0660962802519576),
(('CRM', 'TXN'), 1.0856977608231528),
(('GOOG', 'ADI'), 1.1283331094134528),
(('TSS', 'TXN'), 1.1467625205748755),
(('CRM', 'MCHP'), 1.20869169961224),
(('VRSN', 'MCHP'), 1.2180349493319742),
(('V', 'TXN'), 1.2447344545367391),
(('ADBE', 'AMAT'), 1.2800980451057407),
(('CRM', 'KLAC'), 1.3086781125145386),
(('FB', 'AVGO'), 1.3819220813233803),
(('RHT', 'AMAT'), 1.3902993520505271),
(('GOOG', 'TXN'), 1.399497980887719),
(('INTU', 'XLNX'), 1.4400592840000366),
(('CDNS', 'AMAT'), 1.4409675059472304),
(('NFLX', 'MCHP'), 1.4663045721183574),
(('PAYX', 'INTC'), 1.4694648674078978),
(('ADP', 'INTC'), 1.513227017157409),
(('ANSS', 'AVGO'), 1.5230308747745935),
(('VRSN', 'TXN'), 1.5696648850894575),
(('V', 'KLAC'), 1.6613098801675854),
(('GOOGL', 'TXN'), 1.6840029125995648),
(('CTXS', 'ADI'), 1.7111418190556513),
(('ADSK', 'MCHP'), 1.7208553397356319),
(('SNPS', 'KLAC'), 1.7330298854192816),
(('INTU', 'ADI'), 1.751296011542319),
(('MA', 'MCHP'), 1.7521076086846996),
(('MSFT', 'XLNX'), 1.75269278207508),
(('GOOG', 'KLAC'), 1.7678068660222688),
(('ANSS', 'AMAT'), 1.8386290662095512),
(('MSFT', 'ADI'), 1.8803364815253043),
(('ADSK', 'AVGO'), 1.8915725819144973),
(('NFLX', 'TXN'), 1.9400265053862158),
(('ORCL', 'KLAC'), 1.9666625510388887),
(('INTU', 'KLAC'), 1.9821576343612868),
(('CRM', 'ADI'), 1.997656016697197),
(('INTU', 'MCHP'), 2.022870270040184),
(('WU', 'QCOM'), 2.0307105617280126),
(('MSFT', 'KLAC'), 2.053888970457047),
(('GOOGL', 'KLAC'), 2.0612747556407918),
(('ORCL', 'QRVO'), 2.092417050465725),
(('VRSN', 'KLAC'), 2.1684262007897592),
(('CRM', 'XLNX'), 2.2146502472736547),
(('MSFT', 'MCHP'), 2.220780241314992),
(('EA', 'AVGO'), 2.226087984222831),
(('FB', 'KLAC'), 2.2368750663683565),
(('ORCL', 'ADI'), 2.237859080294177),
(('EBAY', 'QRVO'), 2.277695702694797),
(('SYMC', 'ADI'), 2.2853977924112274),
(('ORCL', 'XLNX'), 2.3265285796604918),
(('ORCL', 'MCHP'), 2.3649409870550215),
(('CTXS', 'XLNX'), 2.368944712068714),
(('EA', 'MCHP'), 2.381116610056907),
(('MA', 'XLNX'), 2.4274168830795517),
(('V', 'AVGO'), 2.4480915503312657),
(('NTAP', 'TXN'), 2.4964700617261997),
(('EBAY', 'KLAC'), 2.5157133544305084),
(('V', 'ADI'), 2.517895089161541),
(('CRM', 'AVGO'), 2.6179215903960937),
(('GOOGL', 'QRVO'), 2.6353344814422806),
(('GOOG', 'QRVO'), 2.6492552874095923),
(('V', 'XLNX'), 2.652737851307526),
(('CRM', 'QRVO'), 2.6604460084357497),
(('CTXS', 'INTC'), 2.7353165635824075),
(('VRSN', 'AVGO'), 2.738503779686982),
(('SYMC', 'XLNX'), 2.766619347766248),
(('SNPS', 'SWKS'), 2.766974459468903),
(('MA', 'KLAC'), 2.778926360724334),
(('CDNS', 'LRCX'), 2.786202384120178),
(('FB', 'TXN'), 2.877096600387234),
(('FISV', 'QRVO'), 2.8842466377820197),
(('VRSN', 'XLNX'), 2.8965990255744365),
(('TSS', 'MCHP'), 2.929578489308252),
(('MA', 'ADI'), 2.935080982147751),
(('SNPS', 'TXN'), 2.9403693350312694),
(('FIS', 'INTC'), 2.948774328347077),
(('ANSS', 'MCHP'), 2.971177160670713),
(('EA', 'SWKS'), 3.0018709650252138),
(('NFLX', 'KLAC'), 3.0468563044287507),
(('VRSN', 'ADI'), 3.06579512064087),
(('EBAY', 'TXN'), 3.0875436703530985),
(('SYMC', 'QRVO'), 3.1271746259948285),
(('ADSK', 'AMAT'), 3.1305312474565037),
(('ADP', 'XLNX'), 3.1367306779098536),
(('TSS', 'XLNX'), 3.167828834732221),
(('FISV', 'INTC'), 3.22238329364667),
(('ADBE', 'AVGO'), 3.229031816092198),
(('FIS', 'TXN'), 3.241467900080252),
(('GOOG', 'MCHP'), 3.263660244777946),
(('INTU', 'QRVO'), 3.299520483719693),
(('FISV', 'TXN'), 3.326089525756251),
(('ADP', 'ADI'), 3.3399481439441305),
(('FB', 'SWKS'), 3.4153550499609855),
(('V', 'QRVO'), 3.423067256173825),
(('CA', 'INTC'), 3.445512618519095),
(('ORCL', 'SWKS'), 3.4639988164450752),
(('ADSK', 'SWKS'), 3.47605743380632),
(('FIS', 'QRVO'), 3.4764696998930686),
(('NFLX', 'AVGO'), 3.4856634117804353),
(('FISV', 'KLAC'), 3.5291831245484273),
(('ORCL', 'AVGO'), 3.628280724780087),
(('NFLX', 'XLNX'), 3.6519196840191097),
(('TSS', 'ADI'), 3.6725300573839),
(('MSFT', 'QRVO'), 3.7170582173486455),
(('GOOGL', 'MCHP'), 3.865271615523935),
(('ORCL', 'TXN'), 3.8927966052404903),
(('RHT', 'LRCX'), 3.9347514482060673),
(('TSS', 'KLAC'), 3.96344097435693),
(('FB', 'QRVO'), 4.048544380735673),
(('GOOGL', 'INTC'), 4.051446129603078),
(('VRSN', 'QRVO'), 4.0625107553148485),
(('INTU', 'AVGO'), 4.071252478781896),
(('MA', 'AVGO'), 4.074457220782927),
(('SNPS', 'QRVO'), 4.085032654272124),
(('MSFT', 'AVGO'), 4.122631744688235),
(('NFLX', 'ADI'), 4.17577528613352),
(('FIS', 'KLAC'), 4.200469503274639),
(('SNPS', 'AMAT'), 4.2111509211811295),
(('FB', 'XLNX'), 4.22918299708272),
(('CTXS', 'QRVO'), 4.297245266774303),
(('FB', 'ADI'), 4.310549648198803),
(('CRM', 'SWKS'), 4.313964227859302),
(('EBAY', 'MCHP'), 4.4358271633060635),
(('V', 'SWKS'), 4.4877857185558785),
(('GOOG', 'INTC'), 4.572231565891804),
(('PAYX', 'QCOM'), 4.575606421432423),
(('SNPS', 'ADI'), 4.665030522378919),
(('SYMC', 'KLAC'), 4.687166171826333),
(('ADBE', 'MCHP'), 4.7534664617861475),
(('EBAY', 'INTC'), 4.844847353878416),
(('MA', 'QRVO'), 4.865818609764919),
(('CTXS', 'TXN'), 4.868710949397263),
(('ADBE', 'LRCX'), 4.947437744266467),
(('CA', 'XLNX'), 4.962389858707814),
(('ATVI', 'LRCX'), 4.964722388916429),
(('TSS', 'AVGO'), 5.100992735044828),
(('CDNS', 'AVGO'), 5.161811092239464),
(('ADSK', 'KLAC'), 5.179947441698206),
(('VRSN', 'SWKS'), 5.213495731194438),
(('CTXS', 'KLAC'), 5.217057967076028),
(('RHT', 'AVGO'), 5.235385460682404),
(('PAYX', 'XLNX'), 5.247483808508269),
(('PAYX', 'ADI'), 5.273245677432621),
(('FB', 'AMAT'), 5.355019798959194),
(('AKAM', 'QCOM'), 5.397042405948808),
(('SNPS', 'XLNX'), 5.42967222082347),
(('GOOG', 'AVGO'), 5.433618853208897),
(('CA', 'ADI'), 5.445553025342331),
(('NFLX', 'QRVO'), 5.465229138847734),
(('MSFT', 'INTC'), 5.500999430446283),
(('EA', 'KLAC'), 5.603884866043956),
(('NTAP', 'KLAC'), 5.622385025555119),
(('EA', 'AMAT'), 5.67038492363221),
(('ANSS', 'SWKS'), 5.708933420619753),
(('INTU', 'INTC'), 5.927393857176819),
(('NTAP', 'MCHP'), 5.934168240322908),
(('CA', 'QCOM'), 5.947465216980399),
(('ANSS', 'KLAC'), 5.995556859729725),
(('NFLX', 'AMAT'), 6.076876469767402),
(('NFLX', 'SWKS'), 6.08352444983263),
(('RHT', 'MCHP'), 6.109404247131482),
(('FIS', 'MCHP'), 6.127472044001941),
(('NTAP', 'AVGO'), 6.141731750548153),
(('INTU', 'SWKS'), 6.186094457908639),
(('GOOGL', 'AVGO'), 6.221321566606663),
(('TSS', 'QRVO'), 6.234948323089551),
(('FISV', 'MCHP'), 6.276913817659961),
(('WU', 'INTC'), 6.444176386626403),
(('MSFT', 'SWKS'), 6.482207717384746),
(('SYMC', 'INTC'), 6.557964616085342),
(('ADSK', 'QRVO'), 6.559853702580115),
(('ADP', 'TXN'), 6.608582231432001),
(('GOOG', 'SWKS'), 6.6086921281843525),
(('VRSN', 'AMAT'), 6.7414531805595095),
(('EBAY', 'SWKS'), 6.773082305246333),
(('TSS', 'INTC'), 6.8381829885920435),
(('NTAP', 'ADI'), 6.955848572814682),
(('MA', 'SWKS'), 6.9578593037274254),
(('V', 'AMAT'), 6.996183783066659),
(('CDNS', 'MCHP'), 7.015705353059896),
(('SYMC', 'TXN'), 7.075507223722063),
(('EA', 'QRVO'), 7.080377861068116),
(('ATVI', 'AMAT'), 7.122805862797942),
(('GOOGL', 'SWKS'), 7.124572235900465),
(('MA', 'INTC'), 7.175343403904266),
(('ANSS', 'TXN'), 7.2347660308521835),
(('EBAY', 'AVGO'), 7.3766072626476955),
(('CRM', 'INTC'), 7.446315317339748),
(('MA', 'AMAT'), 7.618397641012901),
(('CRM', 'AMAT'), 7.6193332473852555),
(('ADP', 'QCOM'), 7.667910811503187),
(('ORCL', 'AMD'), 7.744808769555508),
(('ANSS', 'LRCX'), 7.936326621368339),
(('ADP', 'QRVO'), 7.965328007054493),
(('ADBE', 'SWKS'), 8.013527852182303),
(('NTAP', 'XLNX'), 8.196438205872427),
(('ADSK', 'TXN'), 8.245649450023652),
(('SYMC', 'MCHP'), 8.299938487398158),
(('V', 'INTC'), 8.405913278598494),
(('VRSN', 'INTC'), 8.449443787143437),
(('ADSK', 'XLNX'), 8.688691070489895),
(('SYMC', 'SWKS'), 8.6933873801736),
(('TSS', 'SWKS'), 8.900870412229509),
(('ADBE', 'KLAC'), 8.983868763624907),
(('FISV', 'SWKS'), 8.990832337676242),
(('PAYX', 'TXN'), 9.008339362042072),
(('ORCL', 'INTC'), 9.127402312076551),
(('TSS', 'AMAT'), 9.131078389941319),
(('MSFT', 'AMAT'), 9.217085500885672),
(('FISV', 'AVGO'), 9.366093956502034),
(('SYMC', 'AMD'), 9.402337134118063),
(('EBAY', 'AMD'), 9.470492419121397),
(('NTAP', 'AMAT'), 9.534159756510446),
(('ADP', 'KLAC'), 9.546610453832688),
(('ADSK', 'ADI'), 9.550995428644642),
(('INTU', 'AMAT'), 9.586137473219715),
(('FIS', 'SWKS'), 9.631628850034886),
(('FIS', 'AVGO'), 9.65874003260073),
(('ADBE', 'TXN'), 9.743496085765168),
(('NFLX', 'INTC'), 9.802561115281504),
(('CDNS', 'SWKS'), 9.810187459426341),
(('CA', 'QRVO'), 9.818295962133433),
(('NTAP', 'QRVO'), 9.900726491281485),
(('ANSS', 'QRVO'), 9.963992834470323),
(('EA', 'XLNX'), 9.964181785582714),
(('CDNS', 'MU'), 10.003787537836825),
(('CTXS', 'MCHP'), 10.00839196776698),
(('EA', 'TXN'), 10.291513338129036),
(('ATVI', 'AVGO'), 10.584802766357736),
(('EA', 'ADI'), 10.632559611184648),
(('SNPS', 'AMD'), 10.63366920282201),
(('ADSK', 'LRCX'), 10.712682283806249),
(('CA', 'TXN'), 10.752124544886751),
(('RHT', 'SWKS'), 10.794234160554456),
(('PAYX', 'QRVO'), 10.843215219243742),
(('NTAP', 'SWKS'), 10.95204457640072),
(('RHT', 'MU'), 11.193967715622387),
(('NTAP', 'INTC'), 11.313983355275052),
(('ORCL', 'AMAT'), 11.36725985738814),
(('EA', 'AMD'), 11.369723342340796),
(('FB', 'AMD'), 11.381937533598368),
(('CDNS', 'NVDA'), 11.428964086685394),
(('RHT', 'KLAC'), 11.462190235741506),
(('RHT', 'TXN'), 11.531388438403074),
(('FB', 'INTC'), 11.62931972616743),
(('RHT', 'NVDA'), 11.629685197164665),
(('V', 'AMD'), 11.675134951725434),
(('ANSS', 'ADI'), 11.705766345503283),
(('CRM', 'AMD'), 11.724097423669896),
(('ANSS', 'XLNX'), 11.830091672421048),
(('CTXS', 'QCOM'), 11.870586808569865),
(('SYMC', 'AVGO'), 11.911479971425239),
(('CTXS', 'SWKS'), 11.917604880251496),
(('WU', 'XLNX'), 12.255954726885394),
(('FISV', 'AMD'), 12.297532416462612),
(('GOOG', 'AMAT'), 12.387602835021475),
(('ATVI', 'NVDA'), 12.450756584249177),
(('CDNS', 'KLAC'), 12.486160085666137),
(('CA', 'KLAC'), 12.489029290514786),
(('ATVI', 'MU'), 12.654621517069684),
(('PAYX', 'KLAC'), 12.674199363197296),
(('VRSN', 'AMD'), 12.83430263635474),
(('WU', 'ADI'), 12.83749513294439),
(('SNPS', 'INTC'), 13.039591357224342),
(('GOOG', 'AMD'), 13.05014283097869),
(('FIS', 'AMD'), 13.099156992395116),
(('CTXS', 'AVGO'), 13.109353439176544),
(('GOOGL', 'AMD'), 13.200857208214854),
(('ADP', 'MCHP'), 13.297060351310222),
(('INTU', 'AMD'), 13.539439140899896),
(('GOOGL', 'AMAT'), 13.6722252827118),
(('ADBE', 'QRVO'), 13.91965734214309),
(('EA', 'LRCX'), 13.922809590464553),
(('ADSK', 'AMD'), 13.965940041283234),
(('ADBE', 'MU'), 14.015114483544183),
(('ADBE', 'NVDA'), 14.090597686215157),
(('CTXS', 'AMD'), 14.091955561162326),
(('MSFT', 'AMD'), 14.129795736113532),
(('NFLX', 'AMD'), 14.343703256756674),
(('CDNS', 'TXN'), 14.4872169329341),
(('ATVI', 'MCHP'), 14.488160438511093),
(('SNPS', 'LRCX'), 14.612695806918683),
(('ATVI', 'SWKS'), 14.613960720782337),
(('FISV', 'QCOM'), 14.854789074063332),
(('FIS', 'QCOM'), 15.050310033931911),
(('MA', 'AMD'), 15.096972970089018),
(('ADBE', 'ADI'), 15.641497452290585),
(('ADBE', 'XLNX'), 15.995232558511132),
(('FB', 'LRCX'), 16.302038846448006),
(('EBAY', 'AMAT'), 16.369847413421255),
(('RHT', 'QRVO'), 17.13480950272999),
(('CA', 'MCHP'), 17.589221451485724),
(('NFLX', 'LRCX'), 17.660767634811247),
(('TSS', 'AMD'), 17.737619708919453),
(('ADP', 'SWKS'), 17.82225497072124),
(('CDNS', 'QRVO'), 17.888684971584798),
(('PAYX', 'MCHP'), 17.994460326854544),
(('RHT', 'ADI'), 18.009576455547833),
(('ANSS', 'AMD'), 18.06570273178554),
(('RHT', 'XLNX'), 18.163783871728363),
(('ADP', 'AVGO'), 18.21057535276634),
(('SYMC', 'QCOM'), 18.49612606499751),
(('ANSS', 'NVDA'), 18.617339876833864),
(('NTAP', 'AMD'), 18.791851847270415),
(('FIS', 'AMAT'), 18.883272204506873),
(('GOOGL', 'QCOM'), 19.031407198249152),
(('WU', 'QRVO'), 19.073607208988577),
(('VRSN', 'LRCX'), 19.08250883933981),
(('FISV', 'AMAT'), 19.123786755704163),
(('ANSS', 'MU'), 19.198469575621022),
(('EBAY', 'QCOM'), 19.3054523972676),
(('WU', 'TXN'), 19.515134819031534),
(('ADP', 'AMD'), 19.534769843996934),
(('V', 'LRCX'), 19.694890290000835),
(('ADSK', 'INTC'), 20.185431992966514),
(('MA', 'LRCX'), 20.448689367332328),
(('GOOG', 'QCOM'), 20.56211397569321),
(('CDNS', 'ADI'), 20.64316526927612),
(('CA', 'AMD'), 20.73605408420914),
(('CRM', 'LRCX'), 20.941885192615864),
(('CA', 'SWKS'), 21.009716682953986),
(('ADBE', 'AMD'), 21.141148492516677),
(('CDNS', 'XLNX'), 21.40032723887533),
(('ATVI', 'KLAC'), 21.551867317891215),
(('ANSS', 'INTC'), 21.74741223760421),
(('NTAP', 'LRCX'), 21.859420192094536),
(('TSS', 'LRCX'), 22.172386708097605),
(('AKAM', 'INTC'), 22.18151190769331),
(('ADSK', 'NVDA'), 22.735921675969003),
(('CA', 'AVGO'), 22.746763651316854),
(('CDNS', 'AMD'), 22.751348532462618),
(('SYMC', 'AMAT'), 22.871462838930405),
(('PAYX', 'SWKS'), 22.880415869794682),
(('EA', 'INTC'), 23.2365422708407),
(('WU', 'KLAC'), 23.33664694696038),
(('PAYX', 'AVGO'), 23.43063128659174),
(('MSFT', 'LRCX'), 23.43085212265243),
(('RHT', 'AMD'), 23.50427092505474),
(('INTU', 'LRCX'), 24.105139605998502),
(('PAYX', 'AMD'), 24.34207539586413),
(('CTXS', 'AMAT'), 24.355743725844047),
(('INTU', 'QCOM'), 24.550801037955857),
(('MSFT', 'QCOM'), 24.666532088192184),
(('ADSK', 'MU'), 26.27962743127616),
(('ORCL', 'LRCX'), 26.3380801544425),
(('ADBE', 'INTC'), 26.603769805898253),
(('EA', 'NVDA'), 26.652673080545902),
(('TSS', 'QCOM'), 26.827964552473546),
(('ATVI', 'QRVO'), 27.41276718406751),
(('ORCL', 'QCOM'), 27.748789920419252),
(('ATVI', 'TXN'), 27.81844331361608),
(('MA', 'QCOM'), 27.85879720917311),
(('ATVI', 'AMD'), 28.10278074203699),
(('CRM', 'QCOM'), 28.24636354741675),
(('GOOG', 'LRCX'), 28.827379638643123),
(('SNPS', 'MU'), 28.968260437077348),
(('RHT', 'INTC'), 28.99042315171751),
(('SNPS', 'NVDA'), 29.40447854573659),
(('ADP', 'AMAT'), 29.55695293245271),
(('FB', 'NVDA'), 29.91482373000176),
(('NFLX', 'NVDA'), 29.983751208738926),
(('V', 'QCOM'), 30.091350717807885),
(('VRSN', 'QCOM'), 30.26595065139195),
(('WU', 'MCHP'), 30.394019960623993),
(('GOOGL', 'LRCX'), 30.80466655990735),
(('EA', 'MU'), 31.16913827811288),
(('FB', 'MU'), 32.511727562217175),
(('NFLX', 'QCOM'), 32.61305961899776),
(('VRSN', 'NVDA'), 32.907270434259715),
(('WU', 'AMD'), 33.18207365023986),
(('NTAP', 'MU'), 33.359871632141434),
(('MA', 'NVDA'), 33.3619426658736),
(('ATVI', 'ADI'), 33.61372729988623),
(('V', 'NVDA'), 33.86405899286408),
(('ATVI', 'XLNX'), 33.92204891825767),
(('NFLX', 'MU'), 33.9442862602299),
(('NTAP', 'QCOM'), 34.06382149429426),
(('CDNS', 'INTC'), 34.19982518433737),
(('TSS', 'NVDA'), 34.25145354298562),
(('NTAP', 'NVDA'), 34.49513158308581),
(('EBAY', 'LRCX'), 34.66476798597868),
(('WU', 'SWKS'), 34.76551732113953),
(('VRSN', 'MU'), 35.06684092556682),
(('PAYX', 'AMAT'), 35.66660509503371),
(('FB', 'QCOM'), 35.70497897079473),
(('V', 'MU'), 36.16409052659907),
(('CRM', 'NVDA'), 36.19835380367905),
(('CA', 'AMAT'), 36.49507269746307),
(('MA', 'MU'), 36.66557007389974),
(('AKAM', 'XLNX'), 36.77129923927621),
(('AKAM', 'ADI'), 36.80614763565199),
(('WU', 'AVGO'), 37.30204098086463),
(('CRM', 'MU'), 37.57257377897888),
(('MSFT', 'NVDA'), 37.92775234110368),
(('FIS', 'LRCX'), 38.08838038691995),
(('TSS', 'MU'), 38.311821236585786),
(('INTU', 'NVDA'), 38.44431668259838),
(('SNPS', 'QCOM'), 38.778548066039626),
(('FISV', 'LRCX'), 38.787350179774954),
(('MSFT', 'MU'), 40.37932016642564),
(('INTU', 'MU'), 41.5971311228647),
(('SYMC', 'LRCX'), 43.40338461552502),
(('ORCL', 'NVDA'), 43.57909500699902),
(('AKAM', 'TXN'), 45.15239789357456),
(('GOOG', 'NVDA'), 45.37886778456493),
(('CTXS', 'LRCX'), 46.31669601666305),
(('ORCL', 'MU'), 46.32812837336788),
(('GOOGL', 'NVDA'), 47.857521548210244),
(('AKAM', 'QRVO'), 48.090892214536396),
(('GOOG', 'MU'), 48.13536028587864),
(('ADSK', 'QCOM'), 48.4727188419712),
(('GOOGL', 'MU'), 50.74060670955242),
(('EA', 'QCOM'), 51.25816088996295),
(('ANSS', 'QCOM'), 52.42964327275603),
(('ADP', 'LRCX'), 52.833379866900344),
(('EBAY', 'NVDA'), 53.122504526800135),
(('ATVI', 'INTC'), 53.19064463347061),
(('WU', 'AMAT'), 53.43619825071505),
(('AKAM', 'KLAC'), 53.65149329006273),
(('EBAY', 'MU'), 55.78390231749456),
(('FIS', 'NVDA'), 56.141610858453),
(('FISV', 'NVDA'), 58.00668154104851),
(('FIS', 'MU'), 60.3922441819914),
(('ADBE', 'QCOM'), 60.97321968260145),
(('FISV', 'MU'), 61.0502194903199),
(('PAYX', 'LRCX'), 61.19587107091768),
(('CA', 'LRCX'), 62.2387275194252),
(('AKAM', 'MCHP'), 64.97435841816045),
(('RHT', 'QCOM'), 65.23114304984585),
(('AKAM', 'AMD'), 65.99217019899946),
(('SYMC', 'NVDA'), 66.75494128562804),
(('SYMC', 'MU'), 67.68017770371281),
(('CTXS', 'NVDA'), 68.519632570599),
(('CTXS', 'MU'), 68.9365041143938),
(('AKAM', 'SWKS'), 71.71002966893637),
(('ADP', 'NVDA'), 72.71541042059124),
(('CDNS', 'QCOM'), 72.89573389114403),
(('AKAM', 'AVGO'), 74.57691760935943),
(('ADP', 'MU'), 77.29454456617162),
(('PAYX', 'NVDA'), 82.68973605434279),
(('WU', 'LRCX'), 83.75062683089261),
(('CA', 'NVDA'), 84.39443333912233),
(('PAYX', 'MU'), 85.7109809119928),
(('CA', 'MU'), 89.04651743061828),
(('AKAM', 'AMAT'), 95.54572746289244),
(('ATVI', 'QCOM'), 96.58766474067346),
(('WU', 'NVDA'), 108.20750814688978),
(('WU', 'MU'), 113.82915436034669),
(('AKAM', 'LRCX'), 134.67021174233633),
(('AKAM', 'NVDA'), 162.55528571903338),
(('AKAM', 'MU'), 168.02243780617792)]
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}347}]:} \PY{n+nb}{len}\PY{p}{(}\PY{n}{list\PYZus{}minimized\PYZus{}sum\PYZus{}of\PYZus{}squared\PYZus{}deviations}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}347}]:} 465
\end{Verbatim}
\subsubsection{Excess Return
Computation}\label{excess-return-computation}
Because pairs may open and close at various points during the six-month
trading period, the calculation of the excess return on a portfolio of
pairs is a non-trivial issue. Pairs that open and converge during the
trading interval will have positive cash flows. Because pairs can reopen
after initial convergence, they can have multiple positive cash flows
during the trading interval. Pairs that open but do not converge will
only have cash flows on the last day of the trading interval when all
positions are closed out. Therefore, the payoffs to pairs trading
strategies are a set of positive cash flows that are randomly
distributed throughout the trading period, and a set of cash flows at
the end of the trading interval which can either be positive or
negative.
\paragraph{Main Industry (Software)}\label{main-industry-software}
(('GOOG', 'GOOGL'), 0.04045301814067839) \#\#\#\# Main Industry and
Related Industry (Software and Semiconductors) (('GOOGL', 'XLNX'),
0.5245130065173396)
\subsubsection{Get the daily returns for these industries (GOOG, GOOGL,
XLNX)}\label{get-the-daily-returns-for-these-industries-goog-googl-xlnx}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}116}]:} \PY{c+c1}{\PYZsh{} Get just the column for the normalized price (which is of type Pandas Series)}
\PY{c+c1}{\PYZsh{} and convert to Pandas DataFrame.}
\PY{n}{df\PYZus{}goog} \PY{o}{=} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{GOOG}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{to\PYZus{}frame}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}googl} \PY{o}{=} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}software}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{GOOGL}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{to\PYZus{}frame}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}xlnx} \PY{o}{=} \PY{n}{df\PYZus{}ticker\PYZus{}closing\PYZus{}for\PYZus{}semiconductors}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{XLNX}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{to\PYZus{}frame}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Change the names of the columns.}
\PY{n}{df\PYZus{}goog}\PY{o}{.}\PY{n}{columns} \PY{o}{=} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{df\PYZus{}googl}\PY{o}{.}\PY{n}{columns} \PY{o}{=} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{n}{df\PYZus{}xlnx}\PY{o}{.}\PY{n}{columns} \PY{o}{=} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}
\PY{c+c1}{\PYZsh{} Calculate the mean average}
\PY{n}{df\PYZus{}goog\PYZus{}mean\PYZus{}average} \PY{o}{=} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}googl\PYZus{}mean\PYZus{}average} \PY{o}{=} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}xlnx\PYZus{}mean\PYZus{}average} \PY{o}{=} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} daily returns}
\PY{c+c1}{\PYZsh{} https://www.fool.com/knowledge\PYZhy{}center/how\PYZhy{}to\PYZhy{}convert\PYZhy{}daily\PYZhy{}returns\PYZhy{}to\PYZhy{}annual\PYZhy{}returns.aspx}
\PY{c+c1}{\PYZsh{} amount}
\PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mf}{1.0}
\PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mf}{1.0}
\PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mf}{1.0}
\PY{c+c1}{\PYZsh{} Average return}
\PY{n}{df\PYZus{}goog\PYZus{}average\PYZus{}return} \PY{o}{=} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}googl\PYZus{}average\PYZus{}return} \PY{o}{=} \PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{n}{df\PYZus{}xlnx\PYZus{}average\PYZus{}return} \PY{o}{=} \PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Standard Deviation of return}
\PY{n}{goog\PYZus{}std\PYZus{}return}\PY{o}{=} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}
\PY{n}{googl\PYZus{}std\PYZus{}return} \PY{o}{=} \PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}
\PY{n}{xlnx\PYZus{}std\PYZus{}return} \PY{o}{=} \PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Get the mean of pairs}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Main Industry pairs}
\PY{n}{mean\PYZus{}pair\PYZus{}goog\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{n}{mean\PYZus{}pair\PYZus{}googl\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Related Industry pairs}
\PY{n}{mean\PYZus{}pair\PYZus{}googl\PYZus{}xlnx} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{n}{mean\PYZus{}pair\PYZus{}xlnx\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Get the standard deviation between pairs}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Main Industry pairs}
\PY{n}{std\PYZus{}pair\PYZus{}goog\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}
\PY{n}{std\PYZus{}pair\PYZus{}googl\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}goog}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Related Industry pairs}
\PY{n}{std\PYZus{}pair\PYZus{}googl\PYZus{}xlnx} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}
\PY{n}{std\PYZus{}pair\PYZus{}xlnx\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}xlnx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{+} \PY{n}{df\PYZus{}googl}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{normalized\PYZus{}price}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{} Find whenever the normalized price was 2 times over the standard deviation.}
\PY{c+c1}{\PYZsh{}df\PYZus{}temp\PYZus{}g = df\PYZus{}xlnx[np.abs(df\PYZus{}xlnx[\PYZsq{}normalized\PYZus{}price\PYZsq{}] \PYZhy{} mean\PYZus{}pair\PYZus{}googl\PYZus{}xlnx) \PYZgt{} 2.0*std\PYZus{}pair\PYZus{}googl\PYZus{}xlnx]}
\PY{c+c1}{\PYZsh{} Holding period return}
\PY{n}{df\PYZus{}goog\PYZus{}holding\PYZus{}period\PYZus{}return} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}goog}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}goog}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}
\PY{n}{df\PYZus{}googl\PYZus{}holding\PYZus{}period\PYZus{}return} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}googl}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}googl}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}
\PY{n}{df\PYZus{}xlnx\PYZus{}holding\PYZus{}period\PYZus{}return} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}xlnx}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{]} \PY{o}{/} \PY{n}{df\PYZus{}xlnx}\PY{o}{.}\PY{n}{iloc}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}
\PY{c+c1}{\PYZsh{} Sharpe measure}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Sharpe measure is on total risk.}
\PY{c+c1}{\PYZsh{} Sharpe Raito = (Rp \PYZhy{} Rf)/std}
\PY{c+c1}{\PYZsh{} Rp =\PYZgt{} Average return}
\PY{c+c1}{\PYZsh{} Rf =\PYZgt{} Risk free rate}
\PY{c+c1}{\PYZsh{} std =\PYZgt{} standard deviation}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} https://www.investopedia.com/terms/r/risk\PYZhy{}freerate.asp\PYZsh{}ixzz55B5Vc4ly}
\PY{c+c1}{\PYZsh{} the interest rate on a three\PYZhy{}month U.S. Treasury bill is often used as the risk\PYZhy{}free rate for U.S.\PYZhy{}based investors.}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Risk free rate: https://ycharts.com/indicators/3\PYZus{}month\PYZus{}t\PYZus{}bill}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}goog\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{std} \PY{o}{=} \PY{n}{goog\PYZus{}std\PYZus{}return}
\PY{n}{sharpe\PYZus{}goog} \PY{o}{=} \PY{p}{(}\PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)} \PY{o}{/} \PY{n}{std}
\PY{c+c1}{\PYZsh{}}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}googl\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{std} \PY{o}{=} \PY{n}{googl\PYZus{}std\PYZus{}return}
\PY{n}{sharpe\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)} \PY{o}{/} \PY{n}{std}
\PY{c+c1}{\PYZsh{}}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}xlnx\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{std} \PY{o}{=} \PY{n}{xlnx\PYZus{}std\PYZus{}return}
\PY{n}{sharpe\PYZus{}xlnx} \PY{o}{=} \PY{p}{(}\PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)} \PY{o}{/} \PY{n}{std}
\PY{c+c1}{\PYZsh{} Treynor measure}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Similar to Sharpe except it only considers systematic risks}
\PY{c+c1}{\PYZsh{} not total risk.}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Treynor Raito = (Rp \PYZhy{} Rf)/Beta}
\PY{c+c1}{\PYZsh{} Rp =\PYZgt{} Average return}
\PY{c+c1}{\PYZsh{} Rf =\PYZgt{} Risk free rate}
\PY{c+c1}{\PYZsh{} Beta =\PYZgt{} Beta}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} https://www.investopedia.com/terms/r/risk\PYZhy{}freerate.asp\PYZsh{}ixzz55B5Vc4ly}
\PY{c+c1}{\PYZsh{} the interest rate on a three\PYZhy{}month U.S. Treasury bill is often used as the risk\PYZhy{}free rate for U.S.\PYZhy{}based investors.}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Risk free rate: https://ycharts.com/indicators/3\PYZus{}month\PYZus{}t\PYZus{}bill}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Beta \PYZhy{} https://finance.yahoo.com/quote/GOOG?p=GOOG}
\PY{c+c1}{\PYZsh{} https://finance.yahoo.com/quote/GOOGL?p=GOOGL }
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}goog\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{Beta} \PY{o}{=} \PY{l+m+mf}{1.04}
\PY{n}{treynor\PYZus{}goog} \PY{o}{=} \PY{p}{(}\PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)} \PY{o}{/} \PY{n}{Beta}
\PY{c+c1}{\PYZsh{}}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}googl\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{Beta} \PY{o}{=} \PY{l+m+mf}{1.01}
\PY{n}{treynor\PYZus{}googl} \PY{o}{=} \PY{p}{(}\PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)} \PY{o}{/} \PY{n}{Beta}
\PY{c+c1}{\PYZsh{}}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}goog\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{Beta} \PY{o}{=} \PY{l+m+mf}{0.88}
\PY{n}{treynor\PYZus{}xlnx} \PY{o}{=} \PY{p}{(}\PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)} \PY{o}{/} \PY{n}{Beta}
\PY{c+c1}{\PYZsh{} Jensen\PYZsq{}s Measure}
\PY{c+c1}{\PYZsh{}}
\PY{c+c1}{\PYZsh{} Excess returns}
\PY{c+c1}{\PYZsh{} alpha = Rp \PYZhy{} [Rf \PYZhy{} Beta(Rm \PYZhy{} Rf)]}
\PY{c+c1}{\PYZsh{} Rm is Market return. The market is the S\PYZam{}P 500 so using ticker symbol }
\PY{c+c1}{\PYZsh{} SPX which follows the S\PYZam{}P 500.}
\PY{n}{df\PYZus{}spx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{p}{(}\PY{n}{df\PYZus{}spx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Adj Close}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{/} \PY{p}{(}\PY{n}{df\PYZus{}spx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Adj Close}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{)}\PY{p}{)}
\PY{n}{df\PYZus{}spx\PYZus{}average\PYZus{}return} \PY{o}{=} \PY{n}{df\PYZus{}spx}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{daily\PYZus{}return}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}
\PY{c+c1}{\PYZsh{}}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}goog\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{Beta} \PY{o}{=} \PY{l+m+mf}{0.88}
\PY{n}{Rm} \PY{o}{=} \PY{n}{df\PYZus{}spx\PYZus{}average\PYZus{}return}
\PY{n}{goog\PYZus{}jensen\PYZus{}alpha} \PY{o}{=} \PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{p}{(}\PY{n}{Rf} \PY{o}{\PYZhy{}} \PY{n}{Beta}\PY{o}{*}\PY{p}{(}\PY{n}{Rm} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)}\PY{p}{)}
\PY{c+c1}{\PYZsh{}}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}googl\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{Beta} \PY{o}{=} \PY{l+m+mf}{0.88}
\PY{n}{Rm} \PY{o}{=} \PY{n}{df\PYZus{}spx\PYZus{}average\PYZus{}return}
\PY{n}{googl\PYZus{}jensen\PYZus{}alpha} \PY{o}{=} \PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{p}{(}\PY{n}{Rf} \PY{o}{\PYZhy{}} \PY{n}{Beta}\PY{o}{*}\PY{p}{(}\PY{n}{Rm} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)}\PY{p}{)}
\PY{c+c1}{\PYZsh{}}
\PY{n}{Rp} \PY{o}{=} \PY{n}{df\PYZus{}xlnx\PYZus{}average\PYZus{}return}
\PY{n}{Rf} \PY{o}{=} \PY{l+m+mf}{0.0133}
\PY{n}{Beta} \PY{o}{=} \PY{l+m+mf}{0.88}
\PY{n}{Rm} \PY{o}{=} \PY{n}{df\PYZus{}spx\PYZus{}average\PYZus{}return}
\PY{n}{xlnx\PYZus{}jensen\PYZus{}alpha} \PY{o}{=} \PY{n}{Rp} \PY{o}{\PYZhy{}} \PY{p}{(}\PY{n}{Rf} \PY{o}{\PYZhy{}} \PY{n}{Beta}\PY{o}{*}\PY{p}{(}\PY{n}{Rm} \PY{o}{\PYZhy{}} \PY{n}{Rf}\PY{p}{)}\PY{p}{)}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}117}]:} \PY{n}{goog\PYZus{}jensen\PYZus{}alpha}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}117}]:} 0.882301658875199
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}118}]:} \PY{n}{googl\PYZus{}jensen\PYZus{}alpha}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}118}]:} 0.8719089729169318
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}119}]:} \PY{n}{xlnx\PYZus{}jensen\PYZus{}alpha}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}119}]:} 0.8544309763096805
\end{Verbatim}
\subsubsection{DEBUG}\label{debug}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}59}]:} \PY{n}{df\PYZus{}goog\PYZus{}holding\PYZus{}period\PYZus{}return}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}59}]:} normalized\_price 0.418032
daily\_return inf
dtype: float64
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}53}]:} \PY{n}{df\PYZus{}googl\PYZus{}holding\PYZus{}period\PYZus{}return}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}53}]:} normalized\_price 0.38712
dtype: float64
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}54}]:} \PY{n}{df\PYZus{}xlnx\PYZus{}holding\PYZus{}period\PYZus{}return}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}54}]:} normalized\_price 0.32766
dtype: float64
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}66}]:} \PY{n}{sharpe\PYZus{}goog}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}66}]:} 1.4782398792731872
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}68}]:} \PY{n}{sharpe\PYZus{}googl}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}68}]:} 1.4620040368557
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}69}]:} \PY{n}{sharpe\PYZus{}xlnx}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}69}]:} 1.2894166002250353
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}71}]:} \PY{n}{treynor\PYZus{}goog}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}71}]:} 0.14717192208828575
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}72}]:} \PY{n}{treynor\PYZus{}googl}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}72}]:} 0.14125357724113863
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}73}]:} \PY{n}{treynor\PYZus{}xlnx}
\end{Verbatim}
\begin{Verbatim}[commandchars=\\\{\}]
{\color{outcolor}Out[{\color{outcolor}73}]:} 0.17393045337706498
\end{Verbatim}
% Add a bibliography block to the postdoc
\end{document}
| {
"alphanum_fraction": 0.5275895304,
"avg_line_length": 55.7297524129,
"ext": "tex",
"hexsha": "e6f0240a2fdf38263d4be0fcbca6519b42d97668",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4a7896f3225cb84e2f15770409c1f18bfe529615",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cilsya/coursera",
"max_forks_repo_path": "Trading_Strategies_in_Emerging_Markets/Design_Your_Own_Trading_Strategy/code_process_data/notebook.tex",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "4a7896f3225cb84e2f15770409c1f18bfe529615",
"max_issues_repo_issues_event_max_datetime": "2021-06-01T22:49:40.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-24T16:17:05.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cilsya/coursera",
"max_issues_repo_path": "Trading_Strategies_in_Emerging_Markets/Design_Your_Own_Trading_Strategy/code_process_data/notebook.tex",
"max_line_length": 533,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "4a7896f3225cb84e2f15770409c1f18bfe529615",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cilsya/coursera",
"max_stars_repo_path": "Trading_Strategies_in_Emerging_Markets/Design_Your_Own_Trading_Strategy/code_process_data/notebook.tex",
"max_stars_repo_stars_event_max_datetime": "2021-03-15T13:57:04.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-03-15T13:57:04.000Z",
"num_tokens": 107746,
"size": 265608
} |
\documentclass[11pt,a4paper,twocolumn]{article}
\usepackage{bibcheck}
\usepackage{graphicx}
\usepackage{wrapfig}
\usepackage{tabularx}
\usepackage[table]{xcolor}
\usepackage{multirow}
\usepackage{classlib}
\title{Monster Slayer}
\author{}
\date{}
\begin{document}
\maketitle
\section*{Protectors of Humanity}
The monster slayer is a warrior specialized in slaying vampires, lycantropes, demons and other supernatural creatures. The monster slayers train with clerics and paladins learning their ways, but instead of offering a direct aproach, they like to track their prey and finish them using hunter's strategy: plan traps, get the target where they want them to be and end the fight quickly using divine magic combined with quick attacks.
\section*{Independent adventurers}
A monster slayer is always afraid of getting innocent people to enlarge the undead's armies and they always tend to avoid rallying with non holy members. Their sacred duty to protect humanity from this evil forces, make them paranoid about other humanoids being charmed or dominated by these creatures. \\
Although, slayers are not to be quiet waiting the evil to come to their doors. They usually march seeking for contracts to finish these monsters or accepting charity when needed to protect some villagers.
\section*{Creating a monster slayer}
As you create your slayer character, consider the nature of the training that gave you your particular capabilities. Did you train with a single mentor, wandering the lands together until you mastered the monster slayer’s ways? Did you leave your apprenticeship, or was your mentor slain— perhaps by the same kind of monster that became your favored enemy? Or perhaps you learned your skills as part of a band of slayers affiliated with a monastery, trained in mystic paths as well as sacred lore. You might be self-taught, a recluse who learned combat skills, tracking, and even a magical connection to the gods through the experience in slaying undeads and other monsters. \\ \\
What’s the source of your particular hatred of a certain kind of enemy? Did a monster kill someone you loved or destroy your home village? Or did you see too much of the destruction these monsters cause and commit yourself to reining in their depredations? Is your adventuring career a continuation of your work in protecting the borderlands, or a significant change? What made you join up with a band of adventurers? Do you find it challenging to teach new allies the ways of the wild, or do you welcome the relief from solitude that they offer?
\section*{Quick Build}
You can make a monster slayer quickly by following these suggestions. First, make Dexterity your highest score, followed by Wisdom. (Some witchers who focus on two-weapon fighting make Strength higher than Dexterity.) Second, choose the soldier background.
\begin{table*}
\begin{classtable}
1st & +2 & Favored Enemy, Divine Hunter & - & - & - & - & - &- \\
2nd & +2 & Fighting Style, Spell Casting & 2 & 2 & - & - &- &- \\
3rd & +2 & Monster Slayer Archetype, Primeval Awareness & 3 & 3 & - & - &- &- \\
4th & +2 & Ability Score Improvement & 3 & 3 & - & - &- &- \\
5th & +3 & Extra Attack & 4 & 4 & 2 & - & - & - \\
6th & +3 & Favored Enemy and Divine Hunter Improvement & 4 & 4 & 2 & - & - & - \\
7th & +3 & Monster Slayer Archetype feature & 5 & 4 & 3 & - & - & - \\
8th & +3 & Ability Score Improvement & 5 & 4 & 3 & - & - & - \\
9th & +4 & Slayer of Magic Wielders & 6 & 4 & 3 & 2 & - & - \\
10th & +4 & Divine Hunter Improvement & 6 & 4 & 3 & 2 & - & - \\
11th & +4 & Monster Slayer Archetype feature & 7 & 4 & 3 & 3 & - & - \\
12th & +4 & Ability Score Improvement & 7 & 4 & 3 & 3 & - & - \\
13th & +5 & - & 8 & 4 & 3 & 3 & 1 & - \\
14th & +5 & Favorite Enemy improvement, Vanish & 8 & 4 & 3 & 3 & 1 & - \\
15th & +5 & Monster Slayer Archetype feature & 9 & 4 & 3 & 3 & 2 & - \\
16th & +5 & Ability Score Improvement & 9 & 4 & 3 & 3 & 2 & - \\
17th & +6 & - & 10 & 4 & 3 & 3 & 3 & 1 \\
18th & +6 & Feral Senses & 10 & 4 & 3 & 3 & 3 & 1 \\
19th & +6 & Ability Score Improvement & 11 & 4 & 3 & 3 & 3 & 2 \\
20th & +6 & Foe Slayer & 11 & 4 & 3 & 3 & 3 & 2 \\
\end{classtable}
\end{table*}
\section*{Class Features}
As a monster slayer, you gain the following class features.
\subsection*{Hit Points}
\proficiency{Hit Dice}{1d10 per monster slayer level}
\proficiency{Hit Points at 1st level}{10 + your Constitution modifier}
\proficiency{Hit Points at Higher Levels}{1d10 (or 6) + your Constitution modifier per monster slayer level after 1st}
\subsection*{Proficiencies}
\proficiency{Armor}{light armor, medium armor, shields}
\proficiency{Weapons}{Simple weapons and martial weapons}
\proficiency{Tools}{none}
\proficiency{Saving Throws}{Dexterity, Wisdom}
\proficiency{Skills}{Choose three from Animal Handling, Athletics, Insight, Investigation, Religion, Perception, Stealth, Survival}
\subsection*{Equipment}
You start with the following equipment, in
addition to the equipment granted by your
background:
\begin{itemize}
\item (a) Scale mail or (b) leather armor.
\item (a) Two shortswords or (b) two simple weapons.
\item (a) a dungeoneer's pack or (b) an explorer's pack.
\item a longbow and a quiver of 20 arrows.
\item a holy symbol.
\end{itemize}
\subsection*{Favored Enemy}
Beginning at 1st level, you have significant experience studying, tracking, hunting, and even talking to a certain type of enemy. \\ \\
Choose a type of favored enemy: aberrations, celestials, dragons, elementals, fey, fiends, giants, monstrosities, oozes, or undead.
You have advantage on Wisdom (Survival) checks to track your favored enemies, as well as on Intelligence checks to recall information about them.
When you gain this feature, you also learn one language of your choice that is spoken by your favored enemies, if they speak one at all. \\ \\
You choose one additional favored enemy, as well as an associated language, at 6th and 14th level. As you gain levels, your choices should reflect the types of monsters you have encountered on your adventures.
\subsection*{Divine Hunter}
Beginning at 1st level, you have significant training in dealing with unnatural creatures and your god has provided you the means to deal efficiently with them. \\ \\
You can spend an hour to make a short pray to your god and infuse your attacks with divine energy. Until your next short rest your weapon attacks cause an additional 1d4 points of radiant damage. \\ \\
The damage you cause is increased to 1d6 at level 6th, and 2d4 at level 10th.
\subsection*{Fighting Style}
At 2nd level, you adopt a particular style of fighting as your specialty. Choose one of the following options. \\
You can’t take a Fighting Style option more than once, even if you later get to choose again.
\subsubsection*{Archery}
You gain a +2 bonus to attack rolls you make with ranged weapons.
\subsubsection*{Defense}
While you are wearing armor, you gain a +1 bonus to AC.
\subsubsection*{Dueling}
When you are wielding a melee weapon in one hand and no other weapons, you gain a +2 bonus to damage rolls with that weapon.
\subsubsection*{Two-Weapon Fighting}
When you engage in two-weapon fighting, you can add your ability modifier to the damage of the second attack.
\subsection*{Spellcasting}
By the time you reach 2nd level, you have learned to use your divine connection to cast spells, much as a paladin or a cleric does. See chapter 10 for the general rules of spellcasting and at the end of this document for the monster slayer spell list.
\subsubsection*{Spell Slots}
The monster slayer table shows how many spell slots you have to cast your monster slayer spells of 1st level and higher. To cast one of these spells, you must expend a slot of the spell’s level or higher. You regain all expended spell slots when you finish a long rest. \\
For example, if you know the 1st-level spell heroism and have a 1st-level and a
2nd-level spell slot available, you can cast heroism using either slot. \\
Spells Known of 1st Level and Higher You know two 1st-level spells of your choice
from the monster slayer spell list. \\
The Spells Known column of the monster slayer table shows when you learn more monster slayer spells of your choice. Each of these spells must be of a level for which you have spell slots. For instance, when you reach 5th level in this class, you can learn one new spell of 1st or 2nd level. \\
Additionally, when you gain a level in this class, you can choose one of the witcher spells you know and replace it with another spell from the monster slayer spell list, which also must be of a level for which you have spell slots.
\subsubsection*{Spellcasting Ability}
Wisdom is your spellcasting ability for your monster slayer spells, since your magic draws on your attunement to divine. You use your Wisdom whenever a spell refers to your spellcasting ability. In addition, you use your Wisdom modifier when setting the saving throw DC for a monster slayer spell you cast and when making an attack roll with one. \\
\\
Spell save DC = 8 + your proficiency bonus + your Wisdom modifier \\
\\
Spell attack modifier = your proficiency bonus + your Wisdom modifier \\
\subsection*{Primeval Awareness}
Beginning at 3rd level, you can use your action and expend one monster slayer spell slot to focus your awareness on the region around you. For 1 minute per level of the spell slot you expend, you can sense whether the following types of
creatures are present within 1 mile of you: aberrations, celestials, dragons,
elementals, fey, fiends, and undead. \\
This feature doesn’t reveal the creatures’ location or number.
\subsection*{Ability Score Improvement}
When you reach 4th level, and again at 8th, 12th, 16th, and 19th level, you can increase one ability score of your choice by 2, or you can increase two ability scores of your choice by 1. \\
As normal, you can’t increase an ability score above 20 using this feature.
\subsection*{Extra Attack}
Beginning at 5th level, you can attack twice, instead of once, whenever you take the Attack action on your turn.
\subsection*{Slayer of Magic Wielders}
Beginning at 9th level, you have practiced techniques useful to quickly slay spellcasters, gaining the following benefits:
\begin{itemize}
\item When a creature that you can see attempts to cast a spell, you can use your reaction to make a weapon attack against that creature.
\item When you damage a creature that is concentrating on a spell, that creature has disadvantage on the saving throw it makes to maintain its concentation.
\item You have advantage on saving throws against charming, posesion or domination spells from creatures that you can perceive.
\end{itemize}
\subsection*{Vanish}
Starting at 14th level, you can use the Hide action as a bonus action on your turn. Also, you can’t be tracked by nonmagical means, unless you choose to leave a trail.
\subsection*{Feral Senses}
At 18th level, you gain preternatural senses that help you fight creatures you can’t see. When you attack a creature you can’t see, your inability to see it doesn’t impose disadvantage on your attack rolls against it. \\
You are also aware of the location of any invisible creature within 30 feet of you, provided that the creature isn’t hidden from you and you aren’t blinded or deafened.
\subsection*{Foe Slayer}
At 20th level, you become an unparalleled unter of your enemies. Once on each of your turns, you can add your Wisdom modifier to the attack roll or the damage roll of an attack you make against one of your favored enemies. You can choose to use this feature before or after the roll, but before any effects of the roll are applied.
\section*{Monster Slayer Archetype}
The monster slayer usually is proficient against a given type of creature, gaining spells and features helpfull to finish this creatures.
%% The slayer should have feats matching the following types:
%% Lvl 3: Attack (2 spells lvl1)
%% Lvl 7: Defense (2 spells lvl2)
%% Lvl 11: Superior Attack (2 spells lvl3)
%% Lvl 15: Superior Defense (2 spells lvl4)
\input{vampireslayer}
\input{giantslayer}
\newpage
\section*{Monster Slayer Spell List}
\subsubsection*{1st level}
Bless \\
Command \\
Cure Wounds \\
Detect Evil and Good \\
Detect Magic \\
Detect Poison and Disease \\
Divine Favor \\
Heroism \\
Hunter's mark \\
Jump \\
Longstrider \\
Protection from Evil and Good \\
Purify Food and Drink \\
Searing Smite \\
Thunderous Smite \\
Wrathful Smite \\
\subsubsection*{2nd level}
Aid \\
Barkskin \\
Darkvision \\
Find Traps \\
Lesser Restoration \\
Locate Object \\
Protection from Poison \\
Silence \\
\subsubsection*{3rd level}
Aura of vitality \\
Blinding smite \\
Crusader's Mantle \\
Dispel Magic \\
Elemental Weapon \\
Magic Circle \\
Nondetection \\
Protection from energy \\
Remove Curse \\
Revivify \\
Water breathing \\
Water walk \\
\subsubsection*{4th level}
Aura of life \\
Death Ward \\
Locate Creature \\
Staggering Smite \\
Stoneskin \\
\subsubsection*{5th level}
Banishing Smite \\
Circle of Power \\
Destructive wave \\
Dispel Evil and Good \\
Raise Dead \\
\end{document} | {
"alphanum_fraction": 0.7400841346,
"avg_line_length": 62.7924528302,
"ext": "tex",
"hexsha": "d1624b541f7b5c80b1f492f05daac7a9a1d2bdcb",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9b36e0ffc93187c5b2fece72e4c52c5b24bb47ed",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "LaraFerCue/dungeons_and_dragons",
"max_forks_repo_path": "monsterslayer.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9b36e0ffc93187c5b2fece72e4c52c5b24bb47ed",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "LaraFerCue/dungeons_and_dragons",
"max_issues_repo_path": "monsterslayer.tex",
"max_line_length": 681,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9b36e0ffc93187c5b2fece72e4c52c5b24bb47ed",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "LaraFerCue/dungeons_and_dragons",
"max_stars_repo_path": "monsterslayer.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3507,
"size": 13312
} |
\documentclass[bigger]{beamer}
\input{header-beam} % change to header-handout for handouts
% ====================
\title[Lecture 1]{Philosophy 279: Logic I}
\date{September 10, 2013}
% ====================
\logo{\includegraphics[height=2cm]{UC-vert-phil}}
\include{header}
\beamertemplatenavigationsymbolsempty
\frame{
\titlepage
}
\logo{}
\frame{\frametitle{\insertshorttitle\ (\insertdate)}
\tableofcontents[hidesubsections]
}
\section{Course Info}
\subsection{}
\subsec{Instructors}{
\bit
\item Richard Zach (Prof)
\item Jared Houston (TA)
\item Dan Kary (TA)
\item Samara Burns (PASS Leader)
\bit
\item Find us on 12th floor of Social Sciences
\eit\eit
}
\subsec{Textbook and Software}{
\bit
\item John Barker-Plummer, Jon Barwise and John Etchemendy,\\
\textit{Language, Proof, and
Logic}, CSLI Press (2nd ed)
\item OpenProof software (included)
\bit
\item Tarski's World
\item Boole
\item Fitch
\eit
\item GradeGrinder grading service
\bit
\item Must buy \emph{new} copy, but
\item Electronic-only (cheaper) option available
\item Textbook and software available on computers in AFCL (018 SS)
\eit
\eit
}
\subsec{Evaluation}{
\bit
\item 6 Homework Assignments (50\%)
\bit
\item Lowest mark dropped
\item Must turn in all 6 within a week of due date
\eit
\item 3 Tests (45\%)
\bit
\item Thursday, Oct 17 (15\%)
\item Thursday, Nov 14 (20\%)
\item Thursday, Dec 5 (10\%)
\item In class, 75 minutes, closed book
\eit
\item Participation (5\%)
\bit
\item 5 posts on BlackBoard discussion board or in lecture/tutorial/lab
\eit
\eit
}
\subsec{Course Components}{
\bit
\item Lectures
\item Tutorials
\item Lab Workshop (SS 018)
\item PASS Sessions
\bit
\item Tutorials start next week
\item You have choice between lab and tutorial starting third week
\item Attendance not mandatory but strongly suggested
\item PASS sessions start third week
\eit
\item BlackBoard website discussion forum
\bit
\item Be civil and behave like adults
\item Don't give away answers
\eit
\eit
}
\subsec{Read the Outline and FAQ!}{
\bit
\item Official outline covers all policy questions
\item Syllabus (in outline) has all the dates on it
\item Outline is binding agreement and you are responsible for knowing policies
\item FAQ on \href{http://ucalgary.ca/rzach/279}{Logic I website}
will answer most of your questions, including:
\bit
\item Why do you make me take this course?
\item Why is the grade scale set the way it is?
\item How hard is this course?
\item Why do CPSC and PHIL need at least a C--?
\eit
\eit
}
\section{What is Logic?}
\subsec{What is Logic?}{
\bit
\item \emph{Logic is the science of what follows from what.}
\item Valid and invalid inference
\bit
\item Socrates is human. All humans are mortal.\\
Therefore, Socrates is mortal.
\item Socrates is human. All Greeks are human.\\
Therefore, Socrates is Greek.
\eit
\item Logic investigates what makes the first inference \emph{valid} and the
second \emph{invalid}.
\eit }
\subsec{What is Formal Logic?}{
\bit
\item Studies logical properties of formal languages (not English)
\bit
\item Logical consequence (what follows from what?)
\item Logical consistency (when do sentences contradict one another?)
\eit
\item Expressive power (what can be expressed in a given formal
language, and how?)
\item Formal models (mathematical structures described by formal language)
\item Inference and proof systems (how can it be proved that something
follows from something else?)
\item (Metalogical properties of logical systems)
\eit
}
\subsec{What is Logic Good For? (Philosophy)}{
\bit
\item (Informal) logic originates in philosophy (Aristotle)
\item Valid inference cornerstone of philosophical research
\item Logic itself a subdiscipline of philosophy
\item Formal tools of logic useful to make intuitive
philosophical notions precise
\bit
\item Possibility and necessity
\item Time
\item Moral obligation and permissibility
\item Belief and knowledge
\eit
\eit
}
\subsec{What is Logic Good For? (Mathematics)}{
\bit
\item Formal logic developed in the quest for foundations of mathematics (19th C.)
\item Logical systems provide precise foundational framework for mathematics
\bit
\item Axiomatic systems (e.g, geometry)
\item Algebraic structures (e.g., groups)
\item Set theory (e.g, Zermelo-Fraenkel with Choice)
\eit
\item Formal methods make mathematics more precise
\bit
\item Formal language can make mathematical claims more precise
\item Formal structures can point to alternatives, unveil gaps in proofs
\item Formal proof systems make proofs rigorous
\item Formal proofs make mechanical \emph{proof checking} and \emph{proof search} possible
\eit
\item Logical tools can be applied to mathematical problems
\eit
}
\subsec{What is Logic Good For? (Computer Science)}{
\bit
\item Combinational logic circuits
\item Database query languages
\item Logic programming
\item Knowledge representation
\item Automated reasoning
\item Formal specification and verification (of programs, of hardware designs)
\item Theoretical computer science (theory of computational
complexity, semantics of programming languages)
\eit
}
\section{The Formal Language(s) of FOL}
\subsec{Functions of Language}{
\bit
\item Pick out objects in the ``world''
\item Say things about these objects
\item Different categories of words play different roles
\bit
\item Names (``Alice''), pronouns (``she''), demonstratives
(``this''), certain phrases (``the Queen of England in 2011''):\\ pick
out things (``singular terms'')
\item Verbs, adjectives, common nouns (in combination with other
words): \\ used to say things about the objects picked out by singular
terms
\eit
\item \emph{Predicating:} ascribing a property or relation to object(s)
\eit
}
\subsec{Simple Examples of Predication}{
\bit
\item Name + ``is'' + adjective\\
``Mars is red''\\
``Jupiter is large''
\item Name + ``is'' + comparative + ``than'' + Name\\
``Mars is smaller than Jupiter''
\item Name + ``is a'' + common noun\\
``Venus is a planet''
\item Name + verb (+ Name)
``The Earth moves''\\
``Phobos orbits Mars''\\
\eit
}
\subsec{First-order Languages}{
\bit
\item \emph{Individual constants} pick out things
\[
\sf a, b, c, \dots, mars, jupiter, \dots
\]
\item \emph{Predicate symbols} express properties and relations
\[
\sf Red, Large, Larger, Planet, Orbits, \le, =, \dots
\]
\item To say something about something, combine predicate symbols
andindividual constants
\[
\sf Red(mars), Planet(mars),
Orbits(phobos, mars),
a = b
\]
(atomic sentences)
\eit
}
\section{The Blocks Language}
\subsec{The Blocks Language}{
\begin{tabular}{@{}ll@{}}
Tet(a) & a is a tetrahedron \\
Cube(a) & a is a cube \\
Dodec(a) & a is a dodecahedron \\
Small(a) & a is small \\
Medium(a) & a is medium \\
Large(a) & a is large \\[2ex]
SameSize(a, b) & a is the same size as b \\
SameShape(a, b) & a is the same shape as b\\
Larger(a, b) & a is larger than b \\
Smaller(a, b) & a is smaller than b\\
\end{tabular}
}
\subsec{The Blocks Language}{
\begin{tabular}{@{}ll@{}}
SameCol(a, b) & a is in the same column as b \\
SameRow(a, b) & a is in the same row as b\\
Adjoins(a, b) & a and b are located on adjacent\\
& (but not diagonally) squares \\
LeftOf(a, b) & a is located nearer to the left edge\\ & of the grid than b \\
RightOf(a, b) & a is located nearer to the right edge\\ & of the grid than b\\
FrontOf(a, b) & a is located nearer to the front\\ & of the grid than b\\
BackOf(a, b) & a is located nearer to the back\\ & of the grid than b\\
Between(a, b, c) & a, b and c are in the same row, column,\\
& or diagonal, and a is between b and c
\end{tabular}
}
\end{document}
| {
"alphanum_fraction": 0.7292211187,
"avg_line_length": 24.6838709677,
"ext": "tex",
"hexsha": "bba3d7592eaa0b7d297675a8c83b471767a97709",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "722ec82ae7a4593d40c72083d830c4e3e4864dc0",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "rzach/phil279",
"max_forks_repo_path": "279-lec01.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "722ec82ae7a4593d40c72083d830c4e3e4864dc0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "rzach/phil279",
"max_issues_repo_path": "279-lec01.tex",
"max_line_length": 90,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "722ec82ae7a4593d40c72083d830c4e3e4864dc0",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "rzach/phil279",
"max_stars_repo_path": "279-lec01.tex",
"max_stars_repo_stars_event_max_datetime": "2020-06-21T10:48:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-09-23T13:42:54.000Z",
"num_tokens": 2175,
"size": 7652
} |
\begin{enumerate}
\item Lazy Evaluation
\item Express Functions for relations with Functional Dependencies
\item Curried Form
\item Extensible Aggregators, Function Composition On Aggregator Level
\item Strings as lists
\end{enumerate}
\paragraph{Validity Checks}\NL
All usages/partial defs of a predicate must use the same number of terms.
\paragraph{Sanity Checks}\NL
Unused Predicate warning
Unused EDB
\paragraph{Safety Checks}\NL
All facts must be ground.
All variable terms in a head must be bound in the body.
\subsubsection{Evaluation}
Evaluation can be done in a number of different ways. Either iterative or recursive, bottom-up or top-down.
\paragraph(Recursive Top-Down Query Proofs)\NL
\begin{enumerate}
\item Collect all (ground) facts.
\item Let predicate under evaluation be P(x1, ..., xn).
\item Collect all rules that define P.
\item Do a Memoized Backtracking Search (Top-Down).
\item Construct and AND-OR tree. Unify free-variables at AND-nodes.
\item The OR-Branches are the different rules that can be used to prove the (x1, ..., xn) in P,
i.e. all the rules that define P.
\end{enumerate}
Thus need to have a set of ground facts.
Associate each predicate with a collection of rules that define it.
\paragraph{Cross-Compilation Evaluation}\NL
Enable compile to datalog implementation X.
| {
"alphanum_fraction": 0.7804693414,
"avg_line_length": 33.025,
"ext": "tex",
"hexsha": "5f05a2326f7a177418b7e251ab05a0f3a5cf0093",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f288d96b2b7923a717617412358fb40d07d6ee20",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "HampusBalldin/EDAN70Datalog",
"max_forks_repo_path": "report/ideas.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f288d96b2b7923a717617412358fb40d07d6ee20",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "HampusBalldin/EDAN70Datalog",
"max_issues_repo_path": "report/ideas.tex",
"max_line_length": 107,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f288d96b2b7923a717617412358fb40d07d6ee20",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "HampusBalldin/EDAN70Datalog",
"max_stars_repo_path": "report/ideas.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 316,
"size": 1321
} |
\section{Formalisation of model transformations}
\label{sec:introduction:formalisation_of_model_transformations}
As explained earlier, model transformations are an automated way of modifying and creating models by transforming existing models. Model transformations can be used in a variety of scenarios, from simple modifications within the same domain and language (an endogenous transformation) to conversions between different domains and languages (an exogenous transformation). Furthermore, model transformations can be unidirectional, meaning that a model can only be transformed one way, or bidirectional, meaning that the model can be transformed in both directions. Unidirectional transformations are particularly useful in situations where the output model is meant to be used as a final result, such as code generation. Bidirectional transformations are necessary for situations where the models must be kept consistent. In that case, a change to one model might necessitate a change to the other model, which then can be automated using model transformations.
Since this thesis focuses on model transformations between EMF/Ecore and GROOVE, this thesis focuses on bidirectional exogenous transformations. The transformations between EMF/Ecore and GROOVE are exogenous by definition, since the languages of EMF/Ecore and GROOVE are different, as will be shown later. The bidirectionality of the transformations is beneficial to ensure consistency, which is a useful property to have in software verification.
In order to prove any property on these model transformations, the transformations need to be formalised. The formalisation of a model transformation consists of mathematical definitions and functions that describe the behaviour of the transformation, allowing to mathematically translate an input model to an output model as described by the model transformation. These definitions and functions directly depend on the formalisations of the input and output models themselves, as these are needed to describe the input and output models of the transformations. Because of this dependency, the formalisations of EMF/Ecore and GROOVE must be established as well.
The main disadvantage of the formalisation of model transformations is the direct relationship between the transformation and its input and output language. As a consequence, the formalisation of a model transformation directly depends on the formalisations of its input and output languages. Therefore, it is not possible to give an abstract formalisation for model transformations between different languages. Creating such a formalisation would mean making the formalisations of the input and output languages more abstract. Making these more abstract might result in loss of information, which is undesirable, or an increase in complexity. Within this thesis, this disadvantage was dealt with by only focusing on the model transformations between EMF/Ecore and GROOVE. | {
"alphanum_fraction": 0.839418526,
"avg_line_length": 328.6666666667,
"ext": "tex",
"hexsha": "0a2730c917c0d5b7850d1cc8e4f518402b5def06",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a0e860c4b60deb2f3798ae2ffc09f18a98cf42ca",
"max_forks_repo_licenses": [
"AFL-3.0"
],
"max_forks_repo_name": "RemcodM/thesis-ecore-groove-formalisation",
"max_forks_repo_path": "thesis/tex/01_introduction/01_formalisation_of_model_transformations.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a0e860c4b60deb2f3798ae2ffc09f18a98cf42ca",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"AFL-3.0"
],
"max_issues_repo_name": "RemcodM/thesis-ecore-groove-formalisation",
"max_issues_repo_path": "thesis/tex/01_introduction/01_formalisation_of_model_transformations.tex",
"max_line_length": 959,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a0e860c4b60deb2f3798ae2ffc09f18a98cf42ca",
"max_stars_repo_licenses": [
"AFL-3.0"
],
"max_stars_repo_name": "RemcodM/thesis-ecore-groove-formalisation",
"max_stars_repo_path": "thesis/tex/01_introduction/01_formalisation_of_model_transformations.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 530,
"size": 2958
} |
\chapter{Requirements}
% Specify all of the software requirements to a level of detail sufficient to enable designers to design a software
% system to satisfy those requirements.
% Specify all of the software requirements to a level of detail sufficient to enable testers to test that the software
% system satisfies those requirements.
% At a minimum, describe every input (stimulus) into the software system, every output (response) from the software
% system, and all functions performed by the software system in response to an input or in support of an output.
% The specific requirements should:
% a) Be stated in conformance with all the characteristics described in subclause 5.2 of this International
% Standard.
% b) Be cross-referenced to earlier documents that relate.
% c) Be uniquely identifiable.
\section{Functions}
% Define the fundamental actions that have to take place in the software in accepting and processing the inputs and in processing and generating the outputs, including:
% a) validity checks on the inputs;
% b) exact sequence of operations;
% c) responses to abnormal situations, including:
% 1) overflow;
% 2) communication facilities;
% 3) hardware faults and failures; and
% 4) error handling and recovery;
% d) effect of parameters;
% e) relationship of outputs to inputs, including:
% 1) input/output sequences; and
% 2) formulas for input to output conversion.
% It may be appropriate to partition the functional requirements into sub-functions or sub-processes.
% This does not imply that the software design will also be partitioned that way.
\section{Performance Requirements}
% Specify both the static and the dynamic numerical requirements placed on the software or on human interaction with the software as a whole.
% Static numerical requirements may include the following:
% a) the number of terminals to be supported;
% b) the number of simultaneous users to be supported; and
% c) the amount and type of information to be handled.
% Static numerical requirements are sometimes identified under a separate section entitled Capacity.
% Dynamic numerical requirements may include, for example, the numbers of transactions and tasks and the amount of data to be processed within certain time periods for both normal and peak workload conditions.
% The performance requirements should be stated in measurable terms.
% For example,95 \% of the transactions shall be processed in less than 1 s.rather than,
% An operator shall not have to wait for the transaction to complete.
% NOTE Numerical limits applied to one specific function are normally specified as part of the processing subparagraph description of that function.
\section{Usability Requirements}
% Define usability and quality in use requirements and objectives for the software system that can include measurable effectiveness, efficiency, satisfaction criteria and avoidance of harm that could arise from use in specific contexts of use.
% NOTE Additional guidance on usability requirements can be found in ISO/IEC TR 25060.
\section{Interface Requirements}
% Define all inputs into and outputs from the software system. The description should complement the interface descriptions in 9.6.4.1 through 9.6.4.5, and should not repeat information there.
% Each interface defined should include the following content:
% a) name of item;
% b) description of purpose;
% c) source of input or destination of output;
% d) valid range, accuracy and/or tolerance;
% e) units of measure;
% f) timing;
% g) relationships to other inputs/outputs;
% h) data formats;
% i) command formats; and
% j) data items or information included in the input and output.
\section{Logical Database Requirements}
% Specify the logical requirements for any information that is to be placed into a database, including:
% a) types of information used by various functions;
% b) frequency of use;
% c) accessing capabilities;
% d) data entities and their relationships;
% e) integrity constraints;
% f) security; and
% g) data retention requirements
\section{Design Constraints}
% Specify constraints on the system design imposed by external standards, regulatory requirements, or project
% limitations.
\section{Software System Attributes}
% Specify the required attributes of the software product. The following is a partial list of examples:
% a) Reliability - Specify the factors required to establish the required reliability of the software system
% at time of delivery.
% b) Availability - Specify the factors required to guarantee a defined availability level for the entire
% system such as checkpoint, recovery, and restart.
% c) Security - Specify the requirements to protect the software from accidental or malicious access,
% use modification, destruction, or disclosure. Specific requirements in this area could include the
% need to:
% 1) Utilize certain cryptographic techniques;
% 2) Keep specific log or history data sets;
% 3) Assign certain functions to different modules;
% 4) Restrict communications between some areas of the program;
% 5) Check data integrity for critical variables;
% 6) Assure data privacy.
% d) Maintainability - Specify attributes of software that relate to the ease of maintenance of the
% software itself. These may include requirements for certain modularity, interfaces, or complexity
% limitation. Requirements should not be placed here just because they are thought to be good design
% practices.
% e) Portability - Specify attributes of software that relate to the ease of porting the software to other
% host machines and/or operating systems, including:
% 1) Percentage of elements with host-dependent code;
% 2) Percentage of code that is host dependent;
% 3) Use of a proven portable language;
% 4) Use of a particular compiler or language subset;
% 5) Use of a particular operating system.
\section{Supporting Information}
% Additional supporting information to be considered includes:
% a) sample input/output formats, descriptions of cost analysis studies or results of user surveys;
% b) supporting or background information that can help the readers of the SRS;
% c) a description of the problems to be solved by the software; and
% d) special packaging instructions for the code and the media to meet security, export, initial loading or other requirements.
% The SRS should explicitly state whether or not these information items are to be considered part of the requirements. | {
"alphanum_fraction": 0.758682367,
"avg_line_length": 54.1048387097,
"ext": "tex",
"hexsha": "4f902cb9d599d4fff106638ce464ec1009851dd8",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-04-10T15:41:46.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-04-10T15:41:46.000Z",
"max_forks_repo_head_hexsha": "3e11af717099a0856193af2e152d884e0974a968",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bonellia/srs-ieee-latex",
"max_forks_repo_path": "chapters/requirements.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "3e11af717099a0856193af2e152d884e0974a968",
"max_issues_repo_issues_event_max_datetime": "2021-04-10T15:47:49.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-04-10T15:47:49.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bonellia/srs-ieee-latex",
"max_issues_repo_path": "chapters/requirements.tex",
"max_line_length": 243,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "3e11af717099a0856193af2e152d884e0974a968",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bonellia/srs-ieee-latex",
"max_stars_repo_path": "chapters/requirements.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-11T19:57:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-10T17:15:50.000Z",
"num_tokens": 1388,
"size": 6709
} |
%%%%%%%%%%%%%%%%%%%%% chapter.tex %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sample chapter
%
% Use this file as a template for your own input.
%
%%%%%%%%%%%%%%%%%%%%%%%% Springer-Verlag %%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Chapter Heading}
\label{intro} % Always give a unique label
% use \chaptermark{}
% to alter or adjust the chapter heading in the running head
Your text goes here. Separate text sections with the standard \LaTeX\
sectioning commands.
\section{Section Heading}
\label{sec:1}
% Always give a unique label
% and use \ref{<label>} for cross-references
% and \cite{<label>} for bibliographic references
% use \sectionmark{}
% to alter or adjust the section heading in the running head
Your text goes here. Use the \LaTeX\ automatism for your citations
\cite{monograph}.
\subsection{Subsection Heading}
\label{sec:2}
Your text goes here.
\begin{equation}
\vec{a}\times\vec{b}=\vec{c}
\end{equation}
\subsubsection{Subsubsection Heading}
Your text goes here. Use the \LaTeX\ automatism for cross-references as
well as for your citations, see Sect.~\ref{sec:1}.
\paragraph{Paragraph Heading} %
Your text goes here.
\subparagraph{Subparagraph Heading.} Your text goes here.%
%
\index{paragraph}
% Use the \index{} command to code your index words
%
% For tables use
%
\begin{table}
\centering
\caption{Please write your table caption here}
\label{tab:1} % Give a unique label
%
% For LaTeX tables use
%
\begin{tabular}{lll}
\hline\noalign{\smallskip}
first & second & third \\
\noalign{\smallskip}\hline\noalign{\smallskip}
number & number & number \\
number & number & number \\
\noalign{\smallskip}\hline
\end{tabular}
\end{table}
%
%
% For figures use
%
\begin{figure}
\centering
% Use the relevant command for your figure-insertion program
% to insert the figure file.
% For example, with the option graphics use
\includegraphics[height=4cm]{figure.eps}
%
% If not, use
%\picplace{5cm}{2cm} % Give the correct figure height and width in cm
%
\caption{Please write your figure caption here}
\label{fig:1} % Give a unique label
\end{figure}
%
% For built-in environments use
%
\begin{theorem}
Theorem text goes here.
\end{theorem}
%
% or
%
\begin{lemma}
Lemma text goes here.
\end{lemma}
%
%
% Problems or Exercises should be sorted chapterwise
\section*{Problems}
\addcontentsline{toc}{section}{Problems}
%
% Use the following environment.
% Don't forget to label each problem;
% the label is needed for the solutions' environment
\begin{prob}
\label{prob1}
The problem\footnote{Footnote} is described here. The
problem is described here. The problem is described here.
\end{prob}
\begin{prob}
\label{prob2}
\textbf{Problem Heading}\\
(a) The first part of the problem is described here.\\
(b) The second part of the problem is described here.
\end{prob}
%
| {
"alphanum_fraction": 0.7110633727,
"avg_line_length": 23.4705882353,
"ext": "tex",
"hexsha": "717bd5db63cc08880dc11be275f7c9991db7c6d8",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-10-21T08:51:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-10-21T08:51:24.000Z",
"max_forks_repo_head_hexsha": "e5ccb888741ed14019024c78746bc4f3830df2f3",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "robmelfi/PatternRecognitionLectureNotes",
"max_forks_repo_path": "parte2/chapters/example-chapter.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e5ccb888741ed14019024c78746bc4f3830df2f3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "robmelfi/PatternRecognitionLectureNotes",
"max_issues_repo_path": "parte2/chapters/example-chapter.tex",
"max_line_length": 71,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e5ccb888741ed14019024c78746bc4f3830df2f3",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "robmelfi/PatternRecognitionLectureNotes",
"max_stars_repo_path": "parte2/chapters/example-chapter.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 756,
"size": 2793
} |
% !TeX root = ../main.tex
\section{Conclusion \& Outlook}
\begin{frame}
\frametitle{Agenda}
\tableofcontents[currentsection,hideallsubsections]
\end{frame}
\subsection{Conclusion}
\begin{frame}
\frametitle{Conclusion}
\begin{itemize}[<+->]
\item Design lifetime extension
\begin{itemize}
\item Lifetime parameters
\item Lifetime substitution
\item Lifetime argument inference
\item Subtyping
\end{itemize}
\item Implementation
\begin{itemize}
\item 2388 added and 464 removed lines
\item additional: new test cases with 549 lines
\item independent bug fixes: 665 added and 96 removed lines
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Goals}
\begin{itemize}[<+->]
\item Backwards compatibility (as much as possible)
\begin{itemize}
\item[\checkmark] Yes, except for new keyword \whileyinline{this}
\end{itemize}
\item Keep the language simple
\begin{itemize}
\item[\checkmark] Yes, introduced only necessary concepts
\end{itemize}
\item Develop a basis for memory management without garbage collection
\begin{itemize}
\item[\checkmark] Next slide!
\end{itemize}
\end{itemize}
\end{frame}
\subsection{Outlook}
\begin{frame}
\frametitle{Outlook: Memory Management}
\begin{tikzpicture}[scale=.8]
\stacktop{}
\separator
\cell{} \cellcomL{\texttt{allocations:}} \coordinate (A) at (currentcell.east);
\separator
\cell{} \cellcomL{\texttt{x:}} \coordinate (B) at (currentcell.east);
\separator
\stackbottom{}
\cell[draw=none]{Stack}
\drawstruct{(5,1)}
\structcell{\texttt{length = 1}} \coordinate (C) at (currentcell.north west);
\structcell{} \coordinate (D) at (currentcell.east);
\drawstruct{(8,-2)}
\structcell{\textit{dynamic object}} \coordinate (E) at (currentcell.north west);
\draw[->] (A) -- ($(C)-(2mm,1mm)$);
\draw[->] (B) -- ($(E)-(2mm,2mm)$);
\draw[->] (D) -- ($(E)+(2mm,2mm)$);
\draw (6,-4) node {Heap};
\end{tikzpicture}
\end{frame}
\begin{frame}
\begin{center}
\LARGE
Thank you for your attention!
\vspace*{1cm}
\pause
Questions?
\end{center}
\end{frame}
| {
"alphanum_fraction": 0.6829727187,
"avg_line_length": 25.0117647059,
"ext": "tex",
"hexsha": "7e2e64e81e4f239529c8086aedbc9120f51c01b8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bf75f44d79deb57c2133624cd08dd92b11e0b951",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "SebastianS90/MastersThesis",
"max_forks_repo_path": "presentation/content/04_conclusion_outlook.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bf75f44d79deb57c2133624cd08dd92b11e0b951",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "SebastianS90/MastersThesis",
"max_issues_repo_path": "presentation/content/04_conclusion_outlook.tex",
"max_line_length": 83,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bf75f44d79deb57c2133624cd08dd92b11e0b951",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "SebastianS90/MastersThesis",
"max_stars_repo_path": "presentation/content/04_conclusion_outlook.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 747,
"size": 2126
} |
\documentclass[]{article}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\usepackage[margin=1in]{geometry}
\usepackage{hyperref}
\hypersetup{unicode=true,
pdftitle={Principal Component Analysis},
pdfauthor={Dataset new\_data},
pdfborder={0 0 0},
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
% Redefines (sub)paragraphs to behave more like sections
\ifx\paragraph\undefined\else
\let\oldparagraph\paragraph
\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
\fi
\ifx\subparagraph\undefined\else
\let\oldsubparagraph\subparagraph
\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
\fi
%%% Use protect on footnotes to avoid problems with footnotes in titles
\let\rmarkdownfootnote\footnote%
\def\footnote{\protect\rmarkdownfootnote}
%%% Change title format to be more compact
\usepackage{titling}
% Create subtitle command for use in maketitle
\newcommand{\subtitle}[1]{
\posttitle{
\begin{center}\large#1\end{center}
}
}
\setlength{\droptitle}{-2em}
\title{Principal Component Analysis}
\pretitle{\vspace{\droptitle}\centering\huge}
\posttitle{\par}
\author{Dataset new\_data}
\preauthor{\centering\large\emph}
\postauthor{\par}
\date{}
\predate{}\postdate{}
\begin{document}
\maketitle
This dataset contains 150 individuals and 2 variables.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\subsubsection{1. Study of the outliers}\label{study-of-the-outliers}
The analysis of the graphs does not detect any outlier.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\subsubsection{2. Inertia distribution}\label{inertia-distribution}
The inertia of the first dimensions shows if there are strong
relationships between variables and suggests the number of dimensions
that should be studied.
The first two dimensions of PCA express \textbf{100\%} of the total
dataset inertia ; that means that 100\% of the individuals (or
variables) cloud total variability is explained by the plane. The
inertia observed on the first plane is smaller than the reference value
that equals \textbf{100\%}, therefore low in comparison (the reference
value is the 0.95-quantile of the inertia percentages distribution
obtained by simulating 1429 data tables of equivalent size on the basis
of a normal distribution). However, the inertia related to the first
dimension is greater than the reference value \textbf{57.75\%}. Even if
the inertia projected on the first plane is not significant, these
explained by the first dimension is significant.
\begin{center}\includegraphics{Investigate_files/figure-latex/unnamed-chunk-3-1} \end{center}
\textbf{Figure 2 - Decomposition of the total inertia on the components
of the PCA} \emph{The first factor is largely dominant: it expresses
itself 98.14\% of the data variability.} \emph{Note that in such a case,
the variability related to the other components might be meaningless,
despite of a high percentage.}
An estimation of the right number of axis to interpret suggests to
restrict the analysis to the description of the first 1 axis. These axis
present an amount of inertia greater than those obtained by the
0.95-quantile of random distributions (98.14\% against 57.75\%). This
observation suggests that only this axis is carrying a real information.
As a consequence, the description will stand to these axis.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\subsubsection{3. Description of the dimension
1}\label{description-of-the-dimension-1}
\begin{center}\includegraphics{Investigate_files/figure-latex/unnamed-chunk-4-1} \end{center}
\textbf{Figure 3.1 - Individuals factor map (PCA)} \emph{The labeled
individuals are those with the higher contribution to the plane
construction.}
\begin{center}\includegraphics{Investigate_files/figure-latex/unnamed-chunk-5-1} \end{center}
\textbf{Figure 3.2 - Variables factor map (PCA)} \emph{The labeled
variables are those the best shown on the plane.}
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
The \textbf{dimension 1} opposes individuals such as \emph{108},
\emph{115}, \emph{116}, \emph{123}, \emph{130}, \emph{135} and
\emph{142} (to the right of the graph, characterized by a strongly
positive coordinate on the axis) to individuals characterized by a
strongly negative coordinate on the axis (to the left of the graph).
The group in which the individuals \emph{108}, \emph{115}, \emph{116},
\emph{123}, \emph{130}, \emph{135} and \emph{142} stand (characterized
by a positive coordinate on the axis) is sharing :
\begin{itemize}
\tightlist
\item
high values for the variables \emph{petal\_width} and
\emph{petal\_length} (variables are sorted from the strongest).
\end{itemize}
The group 2 (characterized by a negative coordinate on the axis) is
sharing :
\begin{itemize}
\tightlist
\item
low values for the variables \emph{petal\_length} and
\emph{petal\_width} (variables are sorted from the weakest).
\end{itemize}
Note that the variables \emph{petal\_length} and \emph{petal\_width} are
highly correlated with this dimension (respective correlation of 0.98,
0.98). These variables could therefore summarize themselve the dimension
1.
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\subsubsection{4. Classification}\label{classification}
\begin{center}\includegraphics{Investigate_files/figure-latex/unnamed-chunk-7-1} \end{center}
\textbf{Figure 4 - Ascending Hierarchical Classification of the
individuals.} \emph{The classification made on individuals reveals 3
clusters.}
The \textbf{cluster 1} is made of individuals sharing :
\begin{itemize}
\tightlist
\item
low values for the variables \emph{petal\_length} and
\emph{petal\_width} (variables are sorted from the weakest).
\end{itemize}
The \textbf{cluster 2} is made of individuals such as \emph{135}. This
group is characterized by :
\begin{itemize}
\tightlist
\item
high values for the variable \emph{petal\_length}.
\end{itemize}
The \textbf{cluster 3} is made of individuals such as \emph{108},
\emph{115}, \emph{116}, \emph{123}, \emph{130}, \emph{137}, \emph{142},
\emph{145} and \emph{146}. This group is characterized by :
\begin{itemize}
\tightlist
\item
high values for the variables \emph{petal\_width} and
\emph{petal\_length} (variables are sorted from the strongest).
\end{itemize}
\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}
\subsection{Annexes}\label{annexes}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{dimdesc}\NormalTok{(res, }\DataTypeTok{axes =} \DecValTok{1}\OperatorTok{:}\DecValTok{1}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
$Dim.1
$Dim.1$quanti
correlation p.value
petal_width 0.9906455 6.325627e-130
petal_length 0.9906455 6.325627e-130
\end{verbatim}
\textbf{Figure 5 - List of variables characterizing the dimensions of
the analysis.}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{res.hcpc}\OperatorTok{$}\NormalTok{desc.var}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
Link between the cluster variable and the quantitative variables
================================================================
Eta2 P-value
petal_width 0.9414745 2.511036e-91
petal_length 0.9382122 1.353088e-89
Description of each cluster by quantitative variables
=====================================================
$`1`
v.test Mean in category Overall mean sd in category
petal_width -10.83344 0.244 1.198667 0.1061320
petal_length -11.26285 1.464 3.758667 0.1717673
Overall sd p.value
petal_width 0.7606126 2.390051e-27
petal_length 1.7585292 2.001863e-29
$`2`
v.test Mean in category Overall mean sd in category
petal_length 2.717692 4.296154 3.758667 0.5003697
Overall sd p.value
petal_length 1.758529 0.006573892
$`3`
v.test Mean in category Overall mean sd in category
petal_width 9.441184 2.056250 1.198667 0.2397101
petal_length 8.609194 5.566667 3.758667 0.5432669
Overall sd p.value
petal_width 0.7606126 3.685897e-21
petal_length 1.7585292 7.357580e-18
\end{verbatim}
\textbf{Figure 6 - List of variables characterizing the clusters of the
classification.}
\end{document}
| {
"alphanum_fraction": 0.7293656625,
"avg_line_length": 37.4587155963,
"ext": "tex",
"hexsha": "8d0ddb93084e16d98ed8f8a0413f284ef5046615",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-09-11T11:29:11.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-11T11:29:11.000Z",
"max_forks_repo_head_hexsha": "fad88f60a67be8de741adf7bce1f64d59193e24a",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "xie186/stat_note_with_r",
"max_forks_repo_path": "data/Investigate.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "fad88f60a67be8de741adf7bce1f64d59193e24a",
"max_issues_repo_issues_event_max_datetime": "2019-03-05T03:49:54.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-02-18T15:36:02.000Z",
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "xie186/stat_note_with_r",
"max_issues_repo_path": "data/Investigate.tex",
"max_line_length": 113,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fad88f60a67be8de741adf7bce1f64d59193e24a",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "xie186/stat_note_with_r",
"max_stars_repo_path": "data/Investigate.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3960,
"size": 12249
} |
\documentclass[12pt,a4paper]{article}
\setlength\parindent{0pt}
\thispagestyle{empty}
\begin{document}
\setcounter{secnumdepth}{0}
\section{Back Propagation}
$w_{ji}^l$ Weight from the i neuron in the l-1 layer to the j neuron in the l layer. \\
$b_j^l$ Bias from the j neuron in the l layer. \\
$a_j^l$ Activation from the j neuron in the l layer. \\
$a_j^l = \sum_{i} w_{ji}^l a_i^{l-1} + b_j^l = \sigma(z_j^l)$ \\
$a^l = w^l a^{l-1} + b^l = \sigma(z^l)$ \\
Cost function : $C = \frac{1}{2n} \sum_{i} (target_i - a_i^L)^2$ \\
Cost function derivative : $\frac{\partial C}{\partial a_i^L} = (a_i^L - target_i)$ \\
$\frac{\partial C}{\partial w}$ and $\frac{\partial C}{\partial b}$ represent how quickly Cost function changes with respect to weight w and bias b. Back Propagation is to relate $\delta^l_i$ to $\frac{\partial C}{\partial w}$ and $\frac{\partial C}{\partial b}$. \\
$delta^l_i$ Error in the i neuron in the l layer. \\
BP1 : $\delta _i^L = \frac{\partial C}{\partial a_i^L} \sigma'(z_i^L) = (a_i^L - target_i) \sigma'(z_i^L)$ \\
Matrix : $\delta ^L = \nabla _a C \odot \sigma'(z^L) = (a^L - target) \odot \sigma'(z^L)$ \\
BP2 : $\delta _i^l = \frac{\partial C}{\partial z_i^l} = \sum_{j} \frac{\partial C}{\partial z_j^{l+1}} \frac{\partial z_j^{l+1}}{\partial z_i^l} = \sum_{j} \delta _j^{l+1} \frac{\partial z_j^{l+1}}{\partial z_i^l} = \sum_{j} \delta _j^{l+1} w_{ji}^{l+1} \sigma '(z_i^l)$ \\
$( \frac{\partial z_j^{l+1}}{\partial z_i^l} = w_{ji}^{l+1} \sigma '(z_j^l) )$ \\
BP1 compute the error in L layer, then apply BP2 to compute the layers back through. \\
BP3 : $\frac{\partial C}{\partial b_i^l} = \delta _i^l$ \\
Matrix : $\frac{\partial C}{\partial b} = \delta$ \\
BP4 : $\frac{\partial C}{\partial w_{ji}^l} = a_i^{l-1} \delta _j^l$ \\
Matrix : $\frac{\partial C}{\partial w} = a_{in} \delta _{out}$ \\
$a_{in}$ is the activation of the neuron input to the weight, and $\delta _{out}$ is the error of the neuron output from the weight. \\
update weights according to $w^l = w^l - \frac{\partial C}{\partial w_i_j^l}$. \\
update biases according to $b^l = b^l - \frac{\partial C}{\partial b_i^l}$. \\
\end{document}
| {
"alphanum_fraction": 0.6303142329,
"avg_line_length": 46.0425531915,
"ext": "tex",
"hexsha": "6d30732e5cd7a08b967ded40dd4056b16da1ac2f",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "444e6da2b7c962476714126be516727c2a75cb1f",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "hmeng22/NNNotes",
"max_forks_repo_path": "intro/backPropagationProof.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "444e6da2b7c962476714126be516727c2a75cb1f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "hmeng22/NNNotes",
"max_issues_repo_path": "intro/backPropagationProof.tex",
"max_line_length": 274,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "444e6da2b7c962476714126be516727c2a75cb1f",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "hmeng22/NNNotes",
"max_stars_repo_path": "intro/backPropagationProof.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 811,
"size": 2164
} |
\documentclass{article}
\usepackage[tbtags]{amsmath}
\usepackage{authblk}
\usepackage{bookmark}
\newcommand{\DF}{\ensuremath{=_{df}}}
\begin{document}
\title{BFO-FOL: A First-Order Logic Formalization of Basic Formal Ontology 2.0}
\author[A]{Thomas Bittner}
\author[B]{Mathias Brochhausen}
\author[A]{Randall R. Dipert}
\author[C]{Pierre Grenon}
\author[B]{Bill Hogan}
\author[D]{Leonard Jacuzzo}
\author[E]{Chris Mungall}
\author[F,G]{Fabian Neuhaus}
\author[A]{Mark Ressler}
\author[A]{Alan Ruttenberg}
\affil[A]{The University at Buffalo, Buffalo, NY, USA}
\affil[B]{University of Arkansas for Medical Sciences, Little Rock, AR, USA}
\affil[C]{European Bioinformatics Institute, Hinxton, UK}
\affil[D]{CTG, Buffalo, NY, USA}
\affil[E]{Lawrence Berkeley National Laboratory, Berkeley, CA, USA}
\affil[F]{National Institute of Standards and Technology, Gaithersburg, MD, USA}
\affil[G]{University of Maryland Baltimore County, MD, USA}
\date{July 20, 2012}
\maketitle
\begin{abstract}
This article presents a first-order logic formalization of the revised 2.0 version of Basic Formal Ontology (BFO).
\end{abstract}
BFO-FOL is a formal system specifying the axioms and definitions for expressing Basic Formal Ontology version 2.0 in classical first-order formal logic.
Basic Formal Ontology (BFO) is an upper level ontology initially developed by Barry Smith and Pierre Grenon. The BFO specification is currently undergoing a major revision to version 2.0, which will be supported by a number of formal implementations, including implementations using OWL and CLIF, among others. The first-order logic formalization in BFO-FOL will serve as a foundation for all such implementations.
The BFO 2.0 specification is currently under development, so the formalization presented here represents the state of the specification at the time of writing. The bracketed references of the form [\emph{nnn-nnn}] are to the correspondingly identified definitions, elucidations, axioms, and theorems in the BFO 2.0 specification document \cite{BFO2}.
\section{Formalization}
BFO-FOL is an extension of classical first-order formal logic with identity. It can be represented using any standard axiomatization of the logical calculus. The formalization presented here uses the following symbols for negation, conjunction, disjunction, material implication, biconditional implication, universal and particular quantification, respectively: ${\neg,}\ {\wedge,}\ {\vee,}\ {\supset,}\ {\equiv,}\ {\forall,}\ {\exists}$.
\section{Predicates}
The predicates of BFO-FOL are divided into categorial predicates, which are intended to represent categories or universals, and relational predicates, which are intended to represent relations that hold between individuals within those categories.
According to the meta-theory of BFO, categorial predicates are interpreted as expressing the instantiation of the universal indicated by the categorial predicate name. For example, $Object(a)$ signifies the instantiation of the universal $Object$ by the particular $a$.
Where feasible, predicates have been defined in terms of more primitive predicates. While it is preferable to minimize the number of primitive predicates, some predicates that would seem to be definable needed to be taken as primitive. One reason is that the likely definitions for these predicates would rely on more primitive predicates that are not asserted as categories or relations in BFO. For example, given the primitive category SpatialRegion, it would seem that the category OneDimensionalSpatialRegion should be definable in terms of that primitive category. However, such a definition would need to rely on dimensions, and Dimension is not asserted as a category of BFO.
\subsection{Primitive Categorial Predicates}
The following categorial predicates are taken as primitive:
\begin{description}
\item[Entity(a)] --- Intended interpretation: ``$a$ is an entity''. [001-001]
\item[Continuant(a)] --- ``$a$ is a continuant''. [008-002]
\item[MaterialEntity(a)] --- ``$a$ is a material entity''. [019-002]
\item[Object(a)] --- ``$a$ is an object''. [024-001]
\item[ObjectAggregate(a)] --- ``$a$ is an object aggregate''. [025-004]
\item[FiatObjectPart(a)] --- ``$a$ is a fiat object part''. [027-004]
\item[Site(a)] --- ``$a$ is a site''. [034-002]
\item[SpatialRegion(a)] -- ``$a$ is a spatial region''. [035-001]
\item[ZeroDimensionalSpatialRegion(a)] -- ``$a$ is a zero-dimensional spatial region''. [037-001]
\item[OneDimensionalSpatialRegion(a)] -- ``$a$ is a one-dimensional spatial region''. [038-001]
\item[TwoDimensionalSpatialRegion(a)] -- ``$a$ is a two-dimensional spatial region''. [039-001]
\item[ThreeDimensionalSpatialRegion(a)] -- ``$a$ is a three-dimensional spatial region''. [040-001]
\item[Quality(a)] --- ``$a$ is a quality''. [055-001]
\item[RealizableEntity(a)] --- ``$a$ is a realizable entity''. [058-002]
\item[Role(a)] --- ``$a$ is a role''. [061-001]
\item[Disposition(a)] --- ``$a$ is a disposition''. [062-002]
\item[Function(a)] --- ``$a$ is a function''. [064-001]
\item[Occurrent(a)] --- ``$a$ is an occurrent''. [077-002]
\item[History(a)] --- ``$a$ is a history''. [138-001]
\item[SpatioTemporalRegion(a)] --- ``$a$ is a spatio-temporal region''. [095-001]
\item[TemporalRegion(a)] --- ``$a$ is a temporal region''. [100-001]
\item[ZeroDimensionalTemporalRegion(a)] --- ``$a$ is a zero-dimensional temporal region''. [102-001]
\item[OneDimensionalTemporalRegion(a)] --- ``$a$ is a one-dimensional temporal region''. [103-001]
\end{description}
% NOTE: CLIF does not seem to have specific support for defined predicates, so
% for the defined predicates below, '\equiv' should be replaced by the
% defined '\DF' command for the final LaTeX compilation, but only in the
% defined predicates sections. Further, the letter arguments should be
% replaced by their Greek equivalents, e.g. 'a' replaced by '$\alpha$'
% to make clear that these arguments are used in definitional schemata,
% not as individual names in the system.
\subsection{Defined Categorial Predicates}
The following categorial predicates are defined as indicated:
\begin{description}
\item[IndependentContinuant(a)] --- ``$a$ is an independent continuant''. [017-002]
\begin{equation}
\begin{split}
IndependentContinuant(a) \DF \\
(Continuant(a) \wedge {\neg}{\exists}(b, t)specificallyDependsOnAt(a, b, t))
\end{split}
\end{equation}
\item[ImmaterialEntity(a)] --- ``$a$ is an immaterial entity''. [028-001]
\begin{equation}
\begin{split}
ImmaterialEntity(a) \DF \\
(IndependentContinuant(a) \wedge {\neg}{\exists}(b, t)(MaterialEntity(b) \wedge \\
continuantPartOfAt(b, a, t)))
\end{split}
\end{equation}
\item[ContinuantFiatBoundary(a)] --- ``$a$ is a continuant fiat boundary''. [029-001]
\begin{equation}
\begin{split}
ContinuantFiatBoundary(a) \DF (ImmaterialEntity(a) \wedge \\
{\exists}(b)((ZeroDimensionalSpatialRegion(b) \vee \\
OneDimensionalSpatialRegion(b) \vee \\
TwoDimensionalSpatialRegion(b)) \wedge \\
{\forall}(t)locatedInAt(a, b, t)) \wedge \\
{\neg}{\exists}(c, t)(SpatialRegion(c) \wedge continuantPartOfAt(c, a, t)))
\end{split}
\end{equation}
\item[ZeroDimensionalContinuantFiatBoundary(a)] --- ``$a$ is a zero-dimensional continuant fiat boundary''. [031-001]
\begin{equation}
\begin{split}
ZeroDimensionalContinuantFiatBoundary(a) \DF \\
(ContinuantFiatBoundary(a) \wedge \\
{\exists}(b)(ZeroDimensionalSpatialRegion(b) \wedge \\
{\forall}(t)locatedInAt(a, b, t)))
\end{split}
\end{equation}
\item[OneDimensionalContinuantFiatBoundary(a)] --- ``$a$ is a one-dimensional continuant fiat boundary''. [032-001]
\begin{equation}
\begin{split}
OneDimensionalContinuantFiatBoundary(a) \DF \\
(ContinuantFiatBoundary(a) \wedge \\
{\exists}(b)(OneDimensionalSpatialRegion(b) \wedge \\
{\forall}(t)locatedInAt(a, b, t)))
\end{split}
\end{equation}
\item[TwoDimensionalContinuantFiatBoundary(a)] --- ``$a$ is a two-dimensional continuant fiat boundary''. [033-001]
\begin{equation}
\begin{split}
TwoDimensionalContinuantFiatBoundary(a) \DF \\
(ContinuantFiatBoundary(a) \wedge \\
{\exists}(b)(TwoDimensionalSpatialRegion(b) \wedge \\
{\forall}(t)locatedInAt(a, b, t)))
\end{split}
\end{equation}
\item[SpecificallyDependentContinuant(a)] --- ``$a$ is a specifically dependent continuant''. [050-003]
\begin{equation}
\begin{split}
SpecificallyDependentContinuant(a) \DF (Continuant(a) \wedge \\
{\forall}(t)(existsAt(a, t) \supset {\exists}(b)(IndependentContinuant(b) \wedge \\
{\neg}SpatialRegion(b) \wedge specificallyDependsOnAt(a, b, t))))
\end{split}
\end{equation}
\item[RelationalSpecificallyDependentContinuant(a)] --- ``$a$ is a relational specifically dependent continuant''. [131-004]
\begin{equation}
\begin{split}
RelationalSpecificallyDependentContinuant(a) \DF \\
(SpecificallyDependentContinuant(a) \wedge \\
{\forall}(t){\exists}(b, c)({\neg}SpatialRegion(b) \wedge {\neg}SpatialRegion(c) \wedge {\neg}(b = c) \wedge \\
{\neg}{\exists}(d)(continuantPartOfAt(d, b, t) \wedge continuantPartOfAt(d, c, t)) \\
\wedge specificallyDependsOnAt(a, b, t) \wedge \\
specificallyDependsOnAt(a, c, t)))
\end{split}
\end{equation}
\item[RelationalQuality(a)] --- ``$a$ is a relational quality''. [057-001]
\begin{equation}
\begin{split}
RelationalQuality(a) \DF {\exists}(b, c, t)(IndependentContinuant(b) \wedge \\
IndependentContinuant(c) \wedge \\
qualityOfAt(a, b, t) \wedge qualityOfAt(a, c, t))
\end{split}
\end{equation}
\item[GenericallyDependentContinuant(a)] --- ``$a$ is a generically dependent continuant''. [074-001]
\begin{equation}
\begin{split}
GenericallyDependentContinuant(a) \DF \\
(Continuant(a) \wedge {\exists}(b, t)genericallyDependsOnAt(a, b, t))
\end{split}
\end{equation}
\item[Process(a)] --- ``$a$ is a process''. [083-003]
\begin{equation}
\begin{split}
Process(a) \DF (Occurrent(a) \wedge \\
{\exists}(b)properTemporalPartOf(b, a) \wedge \\
{\exists}(c, t)(MaterialEntity(c) \wedge specificallyDependsOnAt(a, c, t)))
\end{split}
\end{equation}
\item[ProcessBoundary(a)] --- ``$a$ is a process boundary''. [084-001]
\begin{equation}
\begin{split}
ProcessBoundary(a) \DF {\exists}(p)(Process(p) \wedge \\
temporalPartOf(a, p) \wedge {\neg}{\exists}(b)properTemporalPartOf(b, a))
\end{split}
\end{equation}
\item[ProcessProfile(a)] --- ``$a$ is a process profile''. [093-002]
\begin{equation}
\begin{split}
ProcessProfile(a) \DF \\
{\exists}(b)(Process(b) \wedge processProfileOf(a, b))
\end{split}
\end{equation}
\end{description}
\subsection{Primitive Relational Predicates}
The following relational predicates are taken as primitive:
\begin{description}
\item[existsAt(a, t)] --- ``$a$ exists at temporal region $t$''. [118-002]
\item[continuantPartOfAt(a, b, t)] --- ``$a$ is a part of $b$ at temporal region $t$'', where $a$ and $b$ are continuants. [002-001]
\item[occurrentPartOf(a, b)] --- ``$a$ is a part of $b$'', where $a$ and $b$ are occurrents. [003-002]
\item[specificallyDependsOnAt(a, b, t)] --- ``$a$ specifically depends on $b$ at temporal region $t$''. [012-002]
\item[memberPartOfAt(a, b, t)] --- ``$a$ is a member of $b$ at temporal region $t$''. [026-004]
\item[occupiesSpatialRegionAt(a, r, t)] --- ``$a$ occupies spatial region $r$ at temporal region $t$''. [041-002]
\item[realizesAt(a, b, t)] --- ``$a$ realizes $b$ at temporal region $t$''. [059-003]
\item[hasMaterialBasisAt(a, b, t)] --- ``$a$ has the material basis $b$ at temporal region $t$''. [071-002]
\item[genericallyDependsOnAt(a, b, t)] --- ``$a$ generically depends on $b$ at temporal region $t$''. [072-002]
\item[concretizesAt(a, b, t)] --- ``$a$ concretizes $b$ at temporal region $t$'' where $a$ is a specifically dependent continuant and $b$ is a generically dependent continuant. [075-002]
\item[temporallyProjectsOnto(a, b)] --- ``$a$ projects onto $b$'', where $a$ is a spatiotemporal region, and $b$ is a temporal region. [080-003]
\item[spatiallyProjectsOntoAt(a, b, t)] --- ``$a$ projects onto $b$ at temporal region $t$'', where $a$ is a spatiotemporal region and $b$ is a spatial region. [081-003]
\item[occupiesSpatioTemporalRegion(a, r)] --- ``$a$ occupies spatio-temporal region $r$'', where $a$ is an occurrent, and $r$ is a spatiotemporal region. [082-003]
\item[occupiesTemporalRegion(a, t)] --- ``$a$ occupies temporal region $t$'', where $a$ is an occurrent, and $t$ is a temporal region. [132-001]
\item[hasParticipantAt(a, b, t)] --- ``$a$ has participant $b$ at temporal region $t$''. [086-003]
\item[processProfileOf(a, b)] --- ``$a$ is a process profile of $b$''. [094-005]
\item[historyOf(a, b)] --- ``$a$ is the history of $b$'', where $a$ is a history and $b$ is a material entity. [XXX-001]
\end{description}
\subsection{Defined Relational Predicates}
The following relational predicates are defined as indicated:
\begin{description}
\item[properContinuantPartOfAt(a, b, t)] --- ``$a$ is a proper part of $b$ at temporal region $t$'', where $a$ and $b$ are continuants. [004-001]
\begin{equation}
\begin{split}
properContinuantPartOfAt(a, b, t) \DF \\
(continuantPartOfAt(a, b, t) \wedge {\neg}(a = b))
\end{split}
\end{equation}
\item[properOccurrentPartOf(a, b)] --- ``$a$ is a proper part of $b$'', where $a$ and $b$ are occurrents. [005-001]
\begin{equation}
\begin{split}
properOccurrentPartOf(a, b) \DF \\
(occurrentPartOf(a, b) \wedge {\neg}(a = b))
\end{split}
\end{equation}
\item[hasContinuantPartAt(a, b, t)] -- ``$a$ has $b$ as a part at temporal region $t$'', where $a$ and $b$ are continuants. [006-001]
\begin{equation}
\begin{split}
hasContinuantPartAt(a, b, t) \DF continuantPartOfAt(b, a, t)
\end{split}
\end{equation}
\item[hasProperContinuantPartAt(a, b, t)] --- ``$a$ has $b$ as a proper part at temporal region $t$'', where $a$ and $b$ are continuants. [XXX-001]
\begin{equation}
\begin{split}
hasProperContinuantPartAt(a, b, t) \DF \\
properContinuantPartOfAt(b, a, t)
\end{split}
\end{equation}
\item[hasOccurrentPart(a, b)] --- ``$a$ has $b$ as a part'', where $a$ and $b$ are occurrents. [007-001]
\begin{equation}
\begin{split}
hasOccurrentPart(a, b) \DF occurrentPartOf(b, a)
\end{split}
\end{equation}
\item[hasProperOccurrentPart(a, b)] --- ``$a$ as $b$ as a proper part'', where $a$ and $b$ are occurrents. [XXX-001]
\begin{equation}
\begin{split}
hasProperOccurrentPart(a, b) \DF properOccurrentPartOf(b, a)
\end{split}
\end{equation}
\item[locatedInAt(a, b, t)] --- ``$a$ is located in $b$ at temporal region $t$''. [045-001]
\begin{equation}
\begin{split}
locatedInAt(a, b, t) \DF \\
(IndependentContinuant(a) \wedge IndependentContinuant(b) \wedge \\
{\exists}(r_1, r_2)(occupiesSpatialRegionAt(a, r_1, t) \wedge \\
occupiesSpatialRegionAt(b, r_2, t) \wedge \\
continuantPartOfAt(r_1, r_2, t)))
\end{split}
\end{equation}
\item[inheresInAt(a, b, t)] --- ``$a$ inheres in $b$ at temporal region $t$''. [051-002]
\begin{equation}
\begin{split}
inheresInAt(a, b, t) \DF \\
(DependentContinuant(a) \wedge IndependentContinuant(b) \wedge \\
{\neg}SpatialRegion(b) \wedge specificallyDependsOnAt(a, b, t))
\end{split}
\end{equation}
\item[bearerOfAt(a, b, t)] --- ``$a$ is the bearer of $b$ at temporal region $t$''. [053-004]
\begin{equation}
\begin{split}
bearerOfAt(a, b, t) \DF (specificallyDependsOnAt(b, a, t) \wedge \\
IndependentContinuant(a) \wedge {\neg}SpatialRegion(a) \wedge existsAt(b, t))
\end{split}
\end{equation}
\item[qualityOfAt(a, b, t)] --- ``$a$ is a quality of $b$ at temporal region $t$''. [056-002]
\begin{equation}
\begin{split}
qualityOfAt(a, b, t) \DF \\
(Quality(a) \wedge IndependentContinuant(b) \wedge \\
{\neg}SpatialRegion(b) \wedge specificallyDependsOnAt(a, b, t))
\end{split}
\end{equation}
\item[roleOfAt(a, b, t)] --- ``$a$ is a role of $b$ at temporal region $t$''. [065-001]
\begin{equation}
\begin{split}
roleOfAt(a, b, t) \DF (Role(a) \wedge inheresInAt(a, b, t))
\end{split}
\end{equation}
\item[dispositionOf(a, b, t)] --- ``$a$ is a disposition of $b$ at temporal region $t$''. [066-001]
\begin{equation}
\begin{split}
dispositionOf(a, b, t) \DF (Disposition(a) \wedge inheresInAt(a, b, t))
\end{split}
\end{equation}
\item[functionOf(a, b, t)] --- ``$a$ is a function of $b$ at temporal region $t$''. [067-001]
\begin{equation}
\begin{split}
functionOf(a, b, t) \DF (Function(a) \wedge inheresInAt(a, b, t))
\end{split}
\end{equation}
\item[hasRoleAt(a, b, t)] --- ``$a$ has the role $b$ at temporal region $t$''. [068-001]
\begin{equation}
\begin{split}
hasRoleAt(a, b, t) \DF roleOfAt(b, a, t)
\end{split}
\end{equation}
\item[hasDispositionAt(a, b, t)] --- ``$a$ has the disposition $b$ at temporal region $t$''. [069-001]
\begin{equation}
\begin{split}
hasDispositionAt(a, b, t) \DF dispositionOf(b, a, t)
\end{split}
\end{equation}
\item[hasFunctionAt(a, b, t)] --- ``$a$ has the function $b$ at temporal region $t$''. [070-001]
\begin{equation}
\begin{split}
hasFunctionAt(a, b, t) \DF functionOf(b, a, t)
\end{split}
\end{equation}
\item[temporalPartOf(a, b)] --- ``$a$ is a temporal part of $b$'', where $a$ and $b$ are occurrents. [078-003]
\begin{equation}
\begin{split}
temporalPartOf(a, b) \DF (occurrentPartOf(a, b) \wedge \\
{\exists}(t)(TemporalRegion(t) \wedge occupiesSpatioTemporalRegion(a, t)) \wedge \\
{\forall}(c, t_1)((Occurrent(c) \wedge occupiesSpatioTemporalRegion(c, t_1) \wedge \\
occurrentPartOf(t_1, r)) \supset \\
(occurrentPartOf(c, a) \equiv occurrentPartOf(c, b))))
\end{split}
\end{equation}
\item[properTemporalPartOf(a, b)] --- ``$a$ is a proper temporal part of $b$''. [116-001]
\begin{equation}
\begin{split}
properTemporalPartOf(a, b) \DF \\
(temporalPartOf(a, b) \wedge {\neg}(a = b))
\end{split}
\end{equation}
\item[occursIn(a, b] --- ``$a$ occurs in $b$'', where $a$ is a process and $b$ is a material or immaterial entity. [XXX-001]
\begin{equation}
\begin{split}
occursIn(a, b) \DF (Process(a) \wedge \\
(MaterialEntity(b) \vee ImmaterialEntity(b)) \wedge \\
{\exists}(r)(SpatioTemporalRegion(r) \wedge \\
occupiesSpatioTemporalRegion(a, r)) \wedge \\
{\forall}(t)(TemporalRegion(t) \supset ((existsAt(a, t) \supset existsAt(b, t)) \wedge \\
{\exists}(s, s_1)(SpatialRegion(s) \wedge SpatialRegion(s_1) \wedge \\
spatiallyProjectsOntoAt(a, s, t) \wedge \\
occupiesSpatialRegionAt(b, s_1, t) \wedge \\
properContinuantPartOfAt(s, s_1, t)))))
\end{split}
\end{equation}
\item[hasHistory(a, b)] --- ``$a$ has $b$ as its history''. [XXX-001]
\begin{equation}
\begin{split}
hasHistory(a, b) \DF historyOf(b, a)
\end{split}
\end{equation}
\end{description}
\section{Axioms}
The following formulas are asserted as axioms in the system:
\begin{flushright}
\begin{equation}
\begin{split}
{\forall}(x, y, t)((continuantPartOfAt(x, y, t) \wedge \\
continuantPartOfAt(y, x, t)) \supset (x = y))
\end{split}
\end{equation}
[120-001]
\begin{equation}
\begin{split}
{\forall}(x, y, z, t)((continuantPartOfAt(x, y, t) \wedge \\
continuantPartOfAt(y, z, t)) \supset continuantPartOfAt(x, z, t))
\end{split}
\end{equation}
[110-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((continuantPartOfAt(x, y, t) \wedge {\neg}(x = y)) \supset \\
{\exists}(z)(continuantPartOfAt(z, y, t) \wedge \\
{\neg}{\exists}(w)(continuantPartOfAt(w, x, t) \wedge continuantPartOfAt(w, z, t))))
\end{split}
\end{equation}
[121-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)({\exists}(v)(continuantPartOfAt(v, x, t) \wedge \\
continuantPartOfAt(v, y, t)) \supset \\
{\exists}(z){\forall}(u, w)((continuantPartOfAt(w, u, t) \equiv \\
(continuantPartOfAt(w, x, t) \wedge continuantPartOfAt(w, y, t))) \equiv \\
(z = u)))
\end{split}
\end{equation}
[122-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((occurrentPartOf(x, y, t) \wedge occurrentPartOf(y, x, t)) \supset \\
(x = y))
\end{split}
\end{equation}
[123-001]
\begin{equation}
\begin{split}
{\forall}(x, y, z)((occurrentPartOf(x, y) \wedge occurrentPartOf(y, z)) \supset \\
occurrentPartOf(x, z))
\end{split}
\end{equation}
[112-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((occurrentPartOf(x, y, t) \wedge {\neg}(x = y)) \supset \\
{\exists}(z)(occurrentPartOf(z, y, t) \wedge \\
{\neg}{\exists}(w)(occurrentPartOf(w, x, t) \wedge occurrentPartOf(w, z, t))))
\end{split}
\end{equation}
[124-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)({\exists}(v)(occurrentPartOf(v, x, t) \wedge occurrentPartOf(v, y, t)) \supset \\
{\exists}(z){\forall}(u, w)((occurrentPartOf(w, u, t) \equiv \\
(occurrentPartOf(w, x, t) \wedge occurrentPartOf(w, y, t))) \equiv \\
(z = u)))
\end{split}
\end{equation}
[125-001]
\begin{equation}
\begin{split}
{\forall}(x)(Continuant(x) \supset Entity(x))
\end{split}
\end{equation}
[008-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(specificallyDependsOnAt(x, y, t) \supset \\
{\neg}{\exists}(z)(continuantPartOfAt(z, x, t) \wedge continuantPartOfAt(z, y, t)))
\end{split}
\end{equation}
[012-002]
\begin{equation}
\begin{split}
{\forall}(x, y)((Continuant(x) \wedge {\exists}(t)continuantPartOfAt(y, x, t)) \supset \\
Continuant(y))
\end{split}
\end{equation}
[009-002]
\begin{equation}
\begin{split}
{\forall}(x, y)((Continuant(x) \wedge {\exists}(t)hasContinuantPartOfAt(y, x, t)) \supset \\
Continuant(y))
\end{split}
\end{equation}
[126-001]
\begin{equation}
\begin{split}
{\forall}(x)(Material(Entity, x) \supset {\exists}(t)(TemporalRegion(t) \wedge existsAt(x, t)))
\end{split}
\end{equation}
[011-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((Occurrent(x) \wedge IndependentContinuant(y) \wedge \\
specificallyDependsOnAt(x, y, t)) \supset \\
{\forall}(t_1)(existsAt(x, t_1) \supset specificallyDependsOnAt(x, y, t_1)))
\end{split}
\end{equation}
[015-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((Continuant(x) \wedge specificallyDependsOnAt(x, y, t)) \supset \\
{\forall}(t_1)(existsAt(x, t_1) \supset specificallyDependsOnAt(x, y, t_1)))
\end{split}
\end{equation}
[016-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((Continuant(x) \wedge specificallyDependsOnAt(x, y, t)) \supset \\
existsAt(x, t))
\end{split}
\end{equation}
[127-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((Continuant(x) \wedge specificallyDependsOnAt(x, y, t)) \supset \\
existsAt(y, t))
\end{split}
\end{equation}
[128-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((Occurrent(x) \wedge Continuant(y) \wedge \\
specificallyDependsOnAt(x, y, t)) \supset \\
{\forall}(t_1)(existsAt(y, t_1) \supset existsAt(x, t_1)))
\end{split}
\end{equation}
[129-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((Occurrent(x) \wedge Occurrent(y) \wedge \\
specificallyDependsOnAt(x, y, t)) \supset \\
existsAt(y, t))
\end{split}
\end{equation}
[130-001]
\begin{equation}
\begin{split}
{\forall}(x, t)((IndependentContinuant(x) \wedge existsAt(x, t)) \supset \\
{\exists}(y)(Entity(y) \wedge specificallyDependsOnAt(y, x, t)))
\end{split}
\end{equation}
[018-002]
\begin{equation}
\begin{split}
{\forall}(x)(MaterialEntity(x) \supset IndependentContinuant(x))
\end{split}
\end{equation}
[019-002]
\begin{equation}
\begin{split}
{\forall}(x)((Entity(x) \wedge \\
{\exists}(y, t)(MaterialEntity(y) \wedge continuantPartOfAt(y, x, t))) \supset \\
MaterialEntity(x))
\end{split}
\end{equation}
[020-002]
\begin{equation}
\begin{split}
{\forall}(x)(ObjectAggregate(x) \supset \\
(MaterialEntity(x) \wedge {\forall}(t)(existsAt(x, t) \supset \\
{\exists}(y, z)(Object(y) \wedge Object(z) \wedge \\
memberPartOfAt(y, x, t) \wedge memberPartOfAt(z, x, t) \wedge {\neg}(y = z))) \wedge \\
{\neg}{\exists}(w, t_1)(memberPartOfAt(w, x, t_1) \wedge {\neg}Object(w))))
\end{split}
\end{equation}
[025-004]
\begin{equation}
\begin{split}
{\forall}(x)(FiatObjectPart(x) \supset (MaterialEntity(x) \wedge {\forall}(t)(existsAt(x, t) \supset \\
{\exists}(y)(Object(y) \wedge properContinuantPartOfAt(x, y, t)))))
\end{split}
\end{equation}
[027-004]
\begin{equation}
\begin{split}
{\forall}(x, t)((ContinuantFiatBoundary(x) \wedge existsAt(x, t)) \supset \\
{\exists}(y)(SpatialRegion(y) \wedge occupiesSpatialRegionAt(x, y, t)))
\end{split}
\end{equation}
[XXX-001]
\begin{equation}
\begin{split}
{\forall}(x)(Site(x) \supset ImmaterialEntity(x))
\end{split}
\end{equation}
[034-002]
\begin{equation}
\begin{split}
{\forall}(x, t)((Site(x) \wedge existsAt(x, t)) \supset \\
{\exists}(y)(ThreeDimensionalSpatialRegion(y) \wedge \\
occupiesSpatialRegionAt(x, y, t)))
\end{split}
\end{equation}
[153-001]
\begin{equation}
\begin{split}
{\forall}(x)(SpatialRegion(x) \supset Continuant(x))
\end{split}
\end{equation}
[035-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((SpatialRegion(x) \wedge continuantPartOfAt(y, x, t)) \supset \\
SpatialRegion(y))
\end{split}
\end{equation}
[036-001]
\begin{equation}
\begin{split}
{\forall}(x, t)((MaterialEntity(x) \wedge existsAt(x, t)) \supset \\
{\exists}(y)(ThreeDimensionalSpatialRegion(y) \wedge \\
occupiesSpatialRegionAt(x, y, t)))
\end{split}
\end{equation}
[XXX-001]
\begin{equation}
\begin{split}
{\forall}(x)(ZeroDimensionalSpatialRegion(x) \supset SpatialRegion(x))
\end{split}
\end{equation}
[037-001]
\begin{equation}
\begin{split}
{\forall}(x)(OneDimensionalSpatialRegion(x) \supset SpatialRegion(x))
\end{split}
\end{equation}
[038-001]
\begin{equation}
\begin{split}
{\forall}(x)(TwoDimensionalSpatialRegion(x) \supset SpatialRegion(x))
\end{split}
\end{equation}
[039-001]
\begin{equation}
\begin{split}
{\forall}(x)(ThreeDimensionalSpatialRegion(x) \supset SpatialRegion(x))
\end{split}
\end{equation}
[040-001]
\begin{equation}
\begin{split}
{\forall}(x, r, t)(occupiesSpatialRegionAt(x, r, t) \supset \\
(SpatialRegion(r) \wedge IndependentContinuant(x)))
\end{split}
\end{equation}
[041-002]
\begin{equation}
\begin{split}
{\forall}(r, t)(Region(r) \supset occupiesSpatialRegionAt(r, r, t))
\end{split}
\end{equation}
[042-002]
\begin{equation}
\begin{split}
{\forall}(x, y, r_1, t)((occupiesSpatialRegionAt(x, r_1, t) \wedge \\
continuantPartOfAt(y, x, t)) \supset \\
{\exists}(r_2)(continuantPartOfAt(r_2, r_1, t) \wedge \\
occupiesSpatialRegionAt(y, r_2, t)))
\end{split}
\end{equation}
[043-001]
\begin{equation}
\begin{split}
{\forall}(x, y, z, t)((locatedInAt(x, y, t) \wedge locatedInAt(y, z, t)) \supset \\
locatedInAt(x, z, t))
\end{split}
\end{equation}
[046-001]
\begin{equation}
\begin{split}
{\forall}(x, t)(IndependentContinuant(x) \supset \\
{\exists}(r)(SpatialRegion(r) \wedge locatedInAt(x, r, t)))
\end{split}
\end{equation}
[134-001]
\begin{equation}
\begin{split}
{\forall}(x, r, t)((IndependentContinuant(x) \wedge locatedInAt(x, r, t)) \supset \\
{\exists}(r_1)(continuantPartOfAt(r_1, r, t) \wedge \\
occupiesSpatialRegionAt(x, r_1, t)))
\end{split}
\end{equation}
[135-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((continuantPartOfAt(x, y, t) \wedge \\
IndependentContinuant(x)) \supset locatedInAt(x, y, t))
\end{split}
\end{equation}
[047-002]
\begin{equation}
\begin{split}
{\forall}(x, y, z, t)((IndependentContinuant(x) \wedge IndependentContinuant(y) \wedge \\
IndependentContinuant(z) \wedge continuantPartOfAt(x, y, t) \wedge \\
locatedInAt(y, z, t)) \supset locatedInAt(x, z, t))
\end{split}
\end{equation}
[048-001]
\begin{equation}
\begin{split}
{\forall}(x, y, z, t)((IndependentContinuant(x) \wedge IndependentContinuant(y) \wedge \\
IndependentContinuant(z) \wedge locatedInAt(x, y, t) \wedge \\
continuantPartOfAt(y, z, t)) \supset locatedInAt(x, z, t))
\end{split}
\end{equation}
[049-001]
\begin{equation}
\begin{split}
{\forall}(x)({\exists}(y, t)specificallyDependsOnAt(x, y, t) \supset {\neg}MaterialEntity(x))
\end{split}
\end{equation}
[052-001]
\begin{equation}
\begin{split}
{\forall}(x, y, z, t)((specificallyDependsOnAt(x, y, t) \wedge \\
specificallyDependsOnAt(y, z, t)) \supset \\
specificallyDependsOnAt(x, z, t))
\end{split}
\end{equation}
[054-002]
\begin{equation}
\begin{split}
{\forall}(x)(Quality(x) \supset SpecificallyDependentContinuant(x))
\end{split}
\end{equation}
[055-001]
\begin{equation}
\begin{split}
{\forall}(x)({\exists}(t)(existsAt(x, t) \wedge Quality(x)) \supset \\
{\forall}(t_1)(existsAt(x, t_1) \supset Quality(x)))
\end{split}
\end{equation}
[105-001]
\begin{equation}
\begin{split}
{\forall}(x)(RealizableEntity(x) \supset \\
SpecificallyDependentContinuant(x) \wedge \\
{\exists}(y)(IndependentContinuant(y) \wedge {\neg}SpatialRegion(y) \wedge \\
inheresIn(x, y))))
\end{split}
\end{equation}
[058-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(realizesAt(x, y, t) \supset \\
(Process(x) \wedge (Disposition(y) \vee Role(y)) \wedge \\
{\exists}(z)(MaterialEntity(z) \wedge hasParticipantAt(x, z, t) \wedge \\
bearerOfAt(z, y, t))))
\end{split}
\end{equation}
[059-003]
\begin{equation}
\begin{split}
{\forall}(x, t)(RealizableEntity(x) \supset {\exists}(y)(IndependentContinuant(y) \wedge \\
{\neg}SpatialRegion(y) \wedge bearerOfAt(y, x, t)))
\end{split}
\end{equation}
[060-002]
\begin{equation}
\begin{split}
{\forall}(x)(Role(x) \supset RealizableEntity(x))
\end{split}
\end{equation}
[061-001]
\begin{equation}
\begin{split}
{\forall}(x)(Disposition(x) \supset (RealizableEntity(x) \wedge \\
{\exists}(y)(MaterialEntity(y) \wedge bearerOfAt(x, y, t))))
\end{split}
\end{equation}
[062-002]
\begin{equation}
\begin{split}
{\forall}(x, t)((RealizableEntity(x) \wedge existsAt(x, t)) \supset \\
{\exists}(y)(MaterialEntity(y) \wedge specificallyDepends(x, y, t)))
\end{split}
\end{equation}
[063-002]
\begin{equation}
\begin{split}
{\forall}(x)(Function(x) \supset Disposition(x))
\end{split}
\end{equation}
[064-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(hasMaterialBasisAt(x, y, t) \supset \\
(Disposition(x) \wedge MaterialEntity(y) \wedge \\
{\exists}(z)(bearerOfAt(z, x, t) \wedge continuantPartOfAt(y, z, t) \wedge \\
{\exists}(w)(Disposition(w) \wedge (hasDisposition(z, w) \supset \\
continuantPartOfAt(y, z, t))))))
\end{split}
\end{equation}
[071-002]
\begin{equation}
\begin{split}
{\forall}(x, y)({\exists}(t)genericallyDependsOnAt(x, y, t) \supset \\
{\forall}(t_1)(existsAt(x, t_1) \supset {\exists}(z)genericallyDependsOnAt(x, z, t_1)))
\end{split}
\end{equation}
[073-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(concretizesAt(x, y, t) \supset \\
(SpecificallyDependentContinuant(x) \wedge \\
GenericallyDependentContinuant(y) \wedge \\
{\exists}(z)(IndependentContinuant(z) \wedge specificallyDependsOnAt(x, z, t) \wedge \\
genericallyDependsOnAt(y, z, t))))
\end{split}
\end{equation}
[075-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(genericallyDependsOnAt(x, y, t) \supset \\
{\exists}(z)(concretizesAt(z, x, t) \wedge specificallyDependsOnAt(z, y, t)))
\end{split}
\end{equation}
[076-001]
\begin{equation}
\begin{split}
{\forall}(x)(Occurrent(x) \equiv (Entity(x) \wedge {\exists}(y)temporalPartOf(y, x)))
\end{split}
\end{equation}
[079-001]
\begin{equation}
\begin{split}
{\forall}(x)(TemporalRegion(x) \supset occupiesTemporalRegion(x, x))
\end{split}
\end{equation}
[137-001]
\begin{equation}
\begin{split}
{\forall}(x)(ProcessBoundary(x) \supset \\
{\exists}(y)(ZeroDimensionalTemporalRegion(y) \wedge \\
occupiesTemporalRegion(x, y)))
\end{split}
\end{equation}
[085-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(hasParticipantAt(x, y, t) \supset Occurrent(x))
\end{split}
\end{equation}
[087-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(hasParticipantAt(x, y, t) \supset Continuant(y))
\end{split}
\end{equation}
[088-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(hasParticipantAt(x, y, t) \supset existsAt(y, t))
\end{split}
\end{equation}
[089-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((hasParticipantAt(x, y, t) \wedge \\
SpecificallyDependentContinuant(y)) \supset \\
{\exists}(z)(IndependentContinuant(z) \wedge {\neg}SpatialRegion(z) \wedge \\
specificallyDependsOnAt(x, z, t) \wedge specificallyDependsOnAt(y, z, t)))
\end{split}
\end{equation}
[090-003]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((hasParticipantAt(x, y, t) \wedge \\
GenericallyDependentContinuant(y)) \supset \\
{\exists}(z)(IndependentContinuant(z) \wedge {\neg}SpatialRegion(z) \wedge \\
genericallyDependsOn(y, z, t) \wedge specificallyDependsOnAt(x, z, t)))
\end{split}
\end{equation}
[091-003]
\begin{equation}
\begin{split}
{\forall}(x, y)(processProfileOf(x, y) \supset (properContinuantPartOf(x, y) \wedge \\
{\exists}(z, t)(properOccurrentPartOf(z, y) \wedge TemporalRegion(t) \wedge \\
occupiesSpatioTemporalRegion(x, t) \wedge \\
occupiesSpatioTemporalRegion(y, t) \wedge \\
occupiesSpatioTemporalRegion(z, t) \wedge \\
{\neg}{\exists}(w)(occurrentPartOf(w, x) \wedge occurrentPartOf(w, z)))))
\end{split}
\end{equation}
[094-005]
\begin{equation}
\begin{split}
{\forall}(x)(SpatioTemporalRegion(x) \supset Occurrent(x))
\end{split}
\end{equation}
[095-001]
\begin{equation}
\begin{split}
{\forall}(x, y)((SpatioTemporalRegion(x) \wedge occurrentPartOf(y, x)) \supset \\
SpatioTemporalRegion(y))
\end{split}
\end{equation}
[096-001]
\begin{equation}
\begin{split}
{\forall}(x)(SpatioTemporalRegion(x) \supset \\
{\exists}(y)(TemporalRegion(y) \wedge temporallyProjectsOnto(x, y)))
\end{split}
\end{equation}
[098-001]
\begin{equation}
\begin{split}
{\forall}(x, t)(SpatioTemporalRegion(x) \supset \\
{\exists}(y)(SpatialRegion(y) \wedge spatiallyProjectsOntoAt(x, y, t)))
\end{split}
\end{equation}
[099-001]
\begin{equation}
\begin{split}
{\forall}(r)(SpatioTemporalRegion(r) \supset \\
occupiesSpatioTemporalRegion(r, r))
\end{split}
\end{equation}
[107-002]
\begin{equation}
\begin{split}
{\forall}(x)(Occurrent(x) \supset {\exists}(r)(SpatioTemporalRegion(r) \wedge \\
occupiesSpatioTemporalRegion(x, r)))
\end{split}
\end{equation}
[108-001]
\begin{equation}
\begin{split}
{\forall}(x)(TemporalRegion(x) \supset Occurrent(x))
\end{split}
\end{equation}
[100-001]
\begin{equation}
\begin{split}
{\forall}(r)(TemporalRegion(r) \supset occupiesTemporalRegion(r, r))
\end{split}
\end{equation}
[119-002]
\begin{equation}
\begin{split}
{\forall}(x, y)((TemporalRegion(x) \wedge occurrentPartOf(y, x)) \supset \\
TemporalRegion(y))
\end{split}
\end{equation}
[101-001]
\begin{equation}
\begin{split}
{\forall}(x)(ZeroDimensionalTemporalRegion(x) \supset TemporalRegion(x))
\end{split}
\end{equation}
[102-001]
\begin{equation}
\begin{split}
{\forall}(x)(OneDimensionalTemporalRegion(x) \supset TemporalRegion(x))
\end{split}
\end{equation}
[103-001]
\begin{equation}
\begin{split}
{\forall}(x, y, z)((historyOf(x, y) \wedge historyOf(x, z)) \supset (y = z))
\end{split}
\end{equation}
[XXX-001]
\end{flushright}
\section{Theorems}
The following formulas are noted as theorems in the \emph{BFO 2.0 Draft Specification and User's Guide} and are derivable from the definitions and axioms of the system. Of course, these explicitly noted theorems are only a small subset of what is derivable within BFO-FOL.
\begin{flushright}
\begin{equation}
\begin{split}
{\forall}(x, t)(Continuant(x) \supset continuantPartOfAt(x, x, t))
\end{split}
\end{equation}
[111-002]
\begin{equation}
\begin{split}
{\forall}(x)(Occurrent(x) \supset occurrentPartOf(x, x))
\end{split}
\end{equation}
[113-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)((Entity(x) \wedge \\
(continuantPartOfAt(y, x, t) \vee continuantPartOfAt(x, y, t) \vee \\
occurrentPartOf(x, y) \vee occurrentPartOf(y, x))) \supset \\
{\neg}specificallyDependsOnAt(x, y, t))
\end{split}
\end{equation}
[013-002]
\begin{equation}
\begin{split}
{\forall}(x)((Entity(x) \wedge \\
{\exists}(y, t)(MaterialEntity(y) \wedge continuantPartOfAt(x, y, t))) \supset \\
MaterialEntity(x))
\end{split}
\end{equation}
[021-002]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(memberPartOfAt(x, y, t) \supset continuantPartOfAt(x, y, t))
\end{split}
\end{equation}
[104-001]
\begin{equation}
\begin{split}
{\forall}(x, y, t)(specificallyDependsOnAt(x, y, t) \supset \\
{\exists}(z)(IndependentContinuant(z) \wedge {\neg}SpatialRegion(z) \wedge \\
specificallyDependsOnAt(x, z, t)))
\end{split}
\end{equation}
[136-001]
\begin{equation}
\begin{split}
{\forall}(x, y)(properTemporalPartOf(x, y) \supset \\
{\exists}(z)(properTemporalPartOf(z, y) \wedge \\
{\neg}{\exists}(w)(temporalPartOf(w, x) \wedge temporalPartOf(w, z))))
\end{split}
\end{equation}
[117-002]
\begin{equation}
\begin{split}
{\forall}(x, y, z, t)((RealizableEntity(x) \wedge Process(y) \wedge \\
realizesAt(y, x, t) \wedge bearerOfAt(z, x, t)) \supset \\
hasParticipantAt(y, z, t))
\end{split}
\end{equation}
[106-002]
\end{flushright}
\section{Conclusion}
As noted above, the BFO 2.0 specification is currently under development, and thus the axiomatization of the specification in BFO-FOL is accordingly subject to modification and refinement. Of particular interest is the question of what consequences can be derived from these definitions and axioms, both with regard to the formal consistency of BFO-FOL and with regard to whether these consequences would run counter to the basic principles and intentions of BFO. Since BFO-FOL contains a large number of definitions and axioms, the working group is investigating formal tools capable of automating the investigation into these consequences.
\begin{thebibliography}{99}
\bibitem{BFO2}
Barry Smith, et al. \emph{Basic Formal Ontology 2.0: Draft Specification and User's Guide}. Manuscript.
\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.6706096685,
"avg_line_length": 28.6138032305,
"ext": "tex",
"hexsha": "710723e7016b92a5f693cfe49f31615e17709e43",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bf988c5747ff1ac517eeb55a534cab2f62ee52e2",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "zhengj2007/bfo",
"max_forks_repo_path": "src/ontology/fol-ressler/2012-07-20/BFO-FOL.tex",
"max_issues_count": 193,
"max_issues_repo_head_hexsha": "bf988c5747ff1ac517eeb55a534cab2f62ee52e2",
"max_issues_repo_issues_event_max_datetime": "2015-07-24T02:41:37.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-07-23T20:52:43.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "zhengj2007/bfo",
"max_issues_repo_path": "src/ontology/fol-ressler/2012-07-20/BFO-FOL.tex",
"max_line_length": 687,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bf988c5747ff1ac517eeb55a534cab2f62ee52e2",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "zhengj2007/bfo",
"max_stars_repo_path": "src/ontology/fol-ressler/2012-07-20/BFO-FOL.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 12865,
"size": 38972
} |
%!TEX root = ../thesis.tex
%*******************************************************************************
%****************************** Second Chapter *********************************
%*******************************************************************************
\chapter{The LHC and the CMS experiment}
\label{sec:det}
\section{The Large Hadron Collider}
\label{det:LHC}
The LHC, situated at CERN in the Geneva area, is a circular hadron accelerator with a circumference of $27 \; \mathrm{km}$, that is designed for a collision energy of $\sqrt{s} = 14 \TeV$~\cite{1748-0221-3-08-S08001}.
This analysis uses data from proton-proton collisions taken in 2016 where the LHC reached a center of mass energy of $\sqrt{s}= 13 \TeV$.
The protons are assembled in bunches and accelerated to an energy of $450 \; \GeV$ by various pre-accelerators before being injected into the LHC.
The two beams in the LHC run in opposite directions and are kept on their path by 2136 superconducting dipole magnets.
Collisions are induced at four points along the ring of the LHC, as shown in Figure~\ref{fig:det_LHC}. The four main experiments are situated at these interaction points.
The ALICE (A Large Ion Collider Experiment) experiment is designed for heavy ion collisions resulting in events with a very high track multiplicity.
LHCb (Large Hadron Collider beauty) is focused on heavy flavor physics.
The two multi purpose experiments ATLAS (A Toroidal LHC Apparatus) and CMS (Compact Muon Solenoid) are designed to measure and search for low cross section processes.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.62 \textwidth}{!}{\includegraphics{Detector/Figures/LHC.png}}
\caption{Schematic of the LHC ring showing the two beams as well as the four major experiments. It also shows the injection points as well as the cleaning point and the beam dump~\cite{1748-0221-3-08-S08001}.
\label{fig:det_LHC}}
\end{center}
\end{figure}
The instantaneous luminosity $\mathcal{L}$ is a measure for the rate of pp collisions.
It is related to the rate of events $\dot N$ of a process $k$ through the cross section $\sigma_k$:
\begin{equation}
\dot N_k = \mathcal{L} \cdot \sigma_k.
\end{equation}
The luminosity itself can be calculated from the beam properties:
\begin{equation}
\mathcal{L} = \frac{N_b \cdot N_p^2 \cdot v}{\Sigma_x \Sigma_y}.
\end{equation}
Here, $N_b$ stands for the number of bunches, $N_p$ for the number of protons per bunch and $v$ for the rotation frequency of the LHC.
$\Sigma_x$ and $\Sigma_y$ are the effective widths of the overlap of the two beam profiles in x or y direction respectively. The luminosity is determined by measuring those beam profiles.
Special LHC conditions are used to calibrate the relevant parts of the detector for this measurement of the beam profiles. In a Van-der-Meer scan~\cite{Zanetti:1357856} the two beams are shifted against each other, which allows to calibrate the detector for known configurations of $\Sigma_{x,y}$.
The calibrated detectors are then used to determine the instantaneous luminosity during data taking.
In this analysis the data corresponds to an integrated luminosity of \lumivwunc taken at a center of mass energy of $\sqrt{s} = 13 \TeV$.
\section{The CMS detector}
The CMS experiments detector is a multi purpose detector for the study of particles from proton-proton, proton-lead and lead-lead collisions.
With a length of $21.6 \;\si{\meter}$ and a diameter of $15 \;\si{\meter}$ it is relatively small and dense for its weight of 14000 tons~\cite{Bayatian:922757}, thus the name 'compact'.
The detector is built of multiple radial layers ("onion structure") and split into a barrel region in the middle and two endcaps closing the detector structure, as shown in the overview in Figure~\ref{fig:det_CMS}.
The innermost part of the detector is the tracker, followed by the electromagnetic and then the hadronic calorimeter.
These parts of the detector are surrounded by the superconducting solenoid. The muon system is the last part of the detector and is situated outside the solenoid. It is interleaved with the iron return yoke of the magnet.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.89 \textwidth}{!}{\includegraphics{Detector/Figures/cms_complete_labelled}}
\caption{The CMS detector and its relevant subsystems.~\cite{Collaboration:1433717}
\label{fig:det_CMS}}
\end{center}
\end{figure}
The solenoid provides a magnetic field of $3.8 \;\si{\tesla}$ for the inner part of the detector (the tracker and calorimeters) and a field of about $2 \;\si{\tesla}$ in the muon system.
A total energy of $2.6 \;\si{\giga \joule}$ is stored in 2168 loops of superconducting cables.
This strong magnetic field leads to strongly bent tracks for charged particles, allowing a precise measurement of their momenta.
The tracks of particles outside the magnet are bent in the opposite direction of those in the inner part.
The detector is described in a right-handed coordinate system with the origin in the interaction point.
The $z$-axis points in the direction of the counterclockwise beam, the $y$-axis points vertically upwards and the $x$-axis radially points towards the center of the LHC ring.
The $\varphi$ angle is defined in the $x$-$y$ plane, while the angle $\theta$ is defined in the $y$-$z$ plane. Instead of $\theta$ the pseudorapidity $\eta$ is used, since it is invariant under Lorentz transformation as long
as the momentum of a particle is large compared to its mass ($|\vec{\mathrm{p}}|\gg \mathrm{m}$):
\begin{equation}
\eta = -\ln{\tan{\frac{\theta}{2}}}.
\end{equation}
The subsequent sections describe the different parts of the detector including the triggering system that is used to select the collisions for which the data is stored for offline analysis.
\subsection{The tracking system}
\label{set:det_tracker}
The tracking system~\cite{Bayatian:922757} is designed to measure both tracks and vertices with the highest possible precision.
This requires high granularity and, in the LHC environment, a fast response.
The tracking system is comprised of two parts: the inner one with pixels, the outer one with strips.
Its structure in the barrel as well as the endcaps is shown in Figure~\ref{fig:det_Tracker}.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.75 \textwidth}{!}{\includegraphics{Detector/Figures/TrackerLayout}}
\caption{The tracking system of the CMS detector in the barrel and endcap regions. The sketch shows the pixel as well as the strip tracker. The strip tracker consists of the inner barrel (TIB), outer barrel (TOB), inner disks (TID) and endcaps (TEC) parts~\cite{Dominguez:1481838}.
\label{fig:det_Tracker}}
\end{center}
\end{figure}
The first pixel layer is located at a radius of $4.4 \;\si{\centi \meter}$ in the barrel and at $34.5 \;\si{\centi \meter}$ in the endcaps.
Both parts cover a range of $|\eta| < 2.5$.
The barrel region includes three layers of pixel detectors and ten layers of strip detectors, while the endcap includes two layers of pixels and twelve layers of strips.
The 66 million pixels measure $150 \times 100 \; \si{\micro \meter}$. The 9.6 million strips have a width of $80-180 \;\si{\micro \meter}$, together with the pixel detector that allows to separate even closely spaced
particle trajectories.
The position of each tracker part is precisely known from alignment analyses using tracks from collision events and cosmic muons~\cite{Chatrchyan:2014wfa}.
The momenta of charged hadrons with a $\pt < 20 \GeV$ are measured with a resolution of $1\%$ at an incidence of ninety degrees~\cite{Sirunyan:2017ulk}. The relative resolution decreases for higher \pt, reaching a resolution corresponding to
the energy resolution of the calorimeter at several hundred \GeV.
At low \pt, the resolution is dominated by multiple scattering of the particle in the tracker material~\cite{1748-0221-9-10-P10009}. At high \pt, the resolution decreases as the bent of the track is
reduced making the \pt determination more difficult. The resolution in \pt is shown in Figure~\ref{fig:det_trackeffs} for muons and charged pions.
Since high \pt partons usually produce multiple charged hadrons of lower \pt through fragmentation, the tracker can still contribute significantly
to the measurement of high-\pt jets.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.49 \textwidth}{!}{\includegraphics{Detector/Figures/trackpipt}}
\resizebox{0.49 \textwidth}{!}{\includegraphics{Detector/Figures/trackmupt}}
\caption{Resolution of track \pt resolution as a function of the track \pt for simulated charged pions (left) and muons (right)~\cite{1748-0221-9-10-P10009}.
\label{fig:det_trackeffs}}
\end{center}
\end{figure}
\subsection{The electromagnetic calorimeter}
The electromagnetic calorimeter (ECAL)\cite{Bayatian:922757} measures the energy of electrons and photons that produce showers in the ECAL.
These electromagnetic showers should be contained within the ECAL. Additionally, the high granularity of the ECAL helps separate the signals from different particles.
A sketch of the structure of the ECAL is shown in Figure~\ref{fig:det_ECAL}.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.75 \textwidth}{!}{\includegraphics{Detector/Figures/ECAL}}
\caption{A sketch of the electromagnetic calorimeter showing the structure in the barrel and the endcaps as well as the preshower detectors~\cite{Chatrchyan:2009qm}.
\label{fig:det_ECAL}}
\end{center}
\end{figure}
The ECAL is made of lead tungstate with the barrel region covering about $|\eta| < 1.5$ and the endcaps covering $1.5 < |\eta|<3.0$. The crystals are $23 (22)\;\si{\centi \meter}$ deep in the barrel (endcap) corresponding to about 26(25) radiation lengths.
For electrons and photons up to an energy of $1 \TeV$, more than $98 \%$ of the energy of each particle is completely contained in the ECAL.
The crystal depth also corresponds to roughly one interaction length. This implies that approximately two thirds of hadrons start their shower in the ECAL.
The front of the crystals has a size of $2.2 \times 2.2 \;\si{\centi \meter \squared}$ in the barrel and $2.9 \times 2.9 \;\si{\centi \meter \squared}$ in the endcaps. This size corresponds to the Moliere radius of lead tungstate of $2.2 \;\si{\centi \meter}$.
The electronic noise in the ECAL is measured to be about $40 \MeV$ per crystal in the barrel and about $100 \MeV$ in the endcaps.
The energy resolution for an electron measured in the ECAL can be parameterized depending on the electron energy as follows~\cite{Sirunyan:2017ulk}:
\begin{equation}
\frac{\sigma}{\mathrm{E}} = \frac{2.8\%}{\sqrt{\mathrm{E}/\GeV}} \oplus \frac{12\%}{\mathrm{E} / \GeV} \oplus 0.3\%.
\end{equation}
The first term represents a stochastic term caused by fluctuations such as the amount of intrinsic variations of the showering itself. The second term is caused by noise from the electronics and the constant third term is related
to calibration uncertainties, detector non-uniformity and radiation damage.
Photons in jets have a typical energy range of $1-50 \GeV$ where the resolution of the ECAL is excellent.
The so-called preshower detector is situated in front of the two ECAL disks in the endcaps.
The preshower contains two layers: The first is a lead radiator followed by silicon strip sensors.
The detector has a much higher granularity than the ECAL, which allows measuring the initial position of a shower from an electron or photon with a high precision.
The purpose of the preshower is to discriminate between neutral pions decaying into two photons and prompt single photons.
Additionally, a coincidence between ECAL and preshower can be used to identify electrons and photons.
The performance of the preshower is degraded through a large number of neutral pions caused by hadrons interacting with the tracker material.
\subsection{The hadronic calorimeter}
The hadronic calorimeter(HCAL)~\cite{Bayatian:922757} is a sampling calorimeter consisting of layers of brass absorbers and plastic scintillators.
Its purpose is to measure the energy of hadronic showers with a high precision. Additionally, it prevents the hadronic showers from leaking into the muon system.
In the barrel it covers a range of $|\eta|< 1.3$ with a corresponding endcap coverage of $1.3 <|\eta|< 3.0$.
In the barrel the HCAL is about six interaction lengths thick at normal incidence, increasing to over ten interaction lengths at lower incidence angles.
The whole HCAL is shown as a sketch in Figure~\ref{fig:det_HCAL}.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.75 \textwidth}{!}{\includegraphics{Detector/Figures/hcaldisplay.eps}}
\caption{A quarterly schematic of the hadronic calorimeter showing the HCAL in the barrel(HB) and the endcaps(HE), the outer HCAL(HO) and the forward HCAL(HF)~\cite{2010JInst...5T3014C}.
\label{fig:det_HCAL}}
\end{center}
\end{figure}
The outer hadronic calorimeter (HO) is situated outside the solenoid coil, increasing the interaction length as an additional absorber.
In the very central region, the interaction length is further increased by additional layers of steel.
Including the ECAL, the total calorimeter system has a thickness corresponding to a minimum of about twelve interaction lengths in the barrel and ten interaction lengths in the endcaps.
The individual towers of the ECAL have a cross section of $\Delta \eta \times \Delta \varphi = 0.087 \times 0.087$ in the central region of $|\eta|< 1.6$ and a cross section of $\Delta \eta \times \Delta \varphi = 0.017 \times 0.017$
in the more forward region~\cite{Bayatian:922757}.
The electronic noise is measured to be about $200 \MeV$ per tower~\cite{Sirunyan:2017ulk}.
The combined energy resolution for ECAL and HCAL has been measured with a pion beam to be
\begin{equation}
\frac{\sigma}{\mathrm{E}} = \frac{110\%}{\sqrt{\mathrm{E}/\GeV}} \oplus 9\%.
\end{equation}
The two parts of the hadron forward calorimeter (HF) are positioned in the very forward(backward) region of the detector at a distance of $11\;\si{\meter}$ from the interaction point.
It covers a region of up to $|\eta| \approx 5$. It consists of steel absorbers and quartz fibers of two different lengths. The long quartz fibers correspond to roughly ten interaction lengths. The difference in the signal from the short and the long fibers is used to estimate the hadronic and electromagnetic components of the shower.
\subsection{The muon system}
High energy muons mostly interact with matter through ionization.
They are generally neither stopped by, nor decay within the detector, so the momentum can only be measured by reconstructing their tracks.
The purpose of the muon system as the outermost part of the detector is to identify muons and measure their momentum.
The muon system~\cite{Bayatian:922757} consists of four layers with three steel layers of the return yoke of the solenoid between them.
The central region is covered by Drift Tubes (DT) in the region of $|\eta|< 1.2$. The outer region is covered by Cathode Strip Chambers (CSC) in the region of $0.9<|\eta|<2.4$.
Additionally, Resistive Plate Chambers (RPC) cover the range of $|\eta|<1.6$.
A sketch of the whole muon system is shown in Figure~\ref{fig:det_muon}.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.75 \textwidth}{!}{\includegraphics{Detector/Figures/Muon}}
\caption{A quarterly schematic of the muon system showing the Drift Tubes(DT) in the central part, the Cathode Strip Chambers(CSC) in the outer part and the Resistive Plate Chambers(RPC)~\cite{Bayatian:922757}.
\label{fig:det_muon}}
\end{center}
\end{figure}
The DT chambers are filled with a mixture of argon and carbon dioxide. The chambers themselves contain layers which are rotated against each other,
which allows measuring both the $\varphi$ and the $\eta$ projection of the track. The DTs are also used for triggering.
The CSCs are positioned at a right angle with respect to the DTs. The single chambers consist of six gas gaps each. The two coordinates of the track are determined by radial cathode strips and perpendicular anode wires.
The spatial resolution of both the CSCs and the DTs is in the range of $100-200 \; \si{\micro \meter}$, depending on $\eta$.
The RPC system consists of 480 chambers. Their very high time resolution in the sub nano second range allows them to be used for triggering and the association of muon tracks with a specific bunch crossing.
\subsection{Triggering}
\label{set:det_trigger}
The LHC delivers a collision rate of $40 \;\si{\mega \hertz}$. In order to record the data, as it is measured by the detector, this data volume has to be reduced to a manageable size. This reduction is achieved by
only keeping those events for further study in which a relevant result is observed. A two-tiered triggering system is used to reduce the final rate of events to $100 \;\si{\hertz}$.
First the hardware-based Level-1(L1) Trigger is used to reduce the number of events for the software-based high-level trigger (HLT)~\cite{Bayatyan:706847,Tapper:2013yva}.
The L1 trigger consists of programmable electronics using information from the calorimeters as well as the muon system as shown in Figure~\ref{fig:det_Trigger}. The L1 trigger is separated into the muon trigger and the calorimeter trigger. The tracking system is not used at this stage of triggering.
The muon trigger combines track information from the CSCs, DTs and RPCs. It uses information from the calorimeters to measure the isolation of the muons.
The calorimeter trigger combines the ECAL, HCAL and HF. Besides muon triggers, algorithms targeting electrons/photons, taus, jets and the overall energy deposition in the detector are used. Requiring a muon to be contained within a jet allows to target jets originating from a b quark, where the B hadron decays further into a final state containing a muon.
The L1 trigger has a maximal latency of $3.8 \;\si{\micro \second}$ in which a trigger decision has to be delivered to the HLT.
\begin{figure}[htbp!]
\begin{center}
\resizebox{0.75 \textwidth}{!}{\includegraphics{Detector/Figures/Trigger}}
\caption{ Data flow of the L1 trigger system showing the combination of information from the different subsystems~\cite{Tapper:2013yva}.
\label{fig:det_Trigger}}
\end{center}
\end{figure}
The HLT is the second step of the triggering system. It reduces the number of events from $100 \; \si{\kilo \hertz}$ to $100 \; \si{\hertz}$.
Compared to the L1 trigger, it uses more sophisticated reconstruction techniques, which are generally close to the final reconstruction that is used for analysis.
The HLT starts from the L1 decision. It then combines information from the complete detector, including tracks, to reconstruct the respective particle in the path.
Finally, further thresholds on the kinematics, the isolation or the reconstruction quality are required.
These steps are performed sequentially. If one step fails the sequence is stopped.
In order to be able to reduce the rate of certain trigger paths for both the L1 and the HLT, some trigger paths only consider a fraction of events. This technique is called prescaling. If a trigger path is prescaled by two it only considers every second event.
A detailed study of the lepton triggers which are used in this analysis is presented in Chapter~\ref{sec:Trigger}. The measurement of the trigger efficiency is
discussed as well.
The next chapter describes the reconstruction of particles from the detector information.
| {
"alphanum_fraction": 0.7654541739,
"avg_line_length": 71.9632352941,
"ext": "tex",
"hexsha": "a491d0c5ab5e76935525db73da8240c7e89e1b42",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7e43db2b39998bee437879a5b45dcff6c3ebbb57",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "TillArndt/PhD-Thesis",
"max_forks_repo_path": "Detector/Detector.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7e43db2b39998bee437879a5b45dcff6c3ebbb57",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "TillArndt/PhD-Thesis",
"max_issues_repo_path": "Detector/Detector.tex",
"max_line_length": 357,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7e43db2b39998bee437879a5b45dcff6c3ebbb57",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "TillArndt/PhD-Thesis",
"max_stars_repo_path": "Detector/Detector.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4893,
"size": 19574
} |
\documentclass{article}
\usepackage{arxiv}
\usepackage[utf8]{inputenc} % allow utf-8 input
\usepackage[T1]{fontenc} % use 8-bit T1 fonts
\usepackage{hyperref} % hyperlinks
\usepackage{url} % simple URL typesetting
\usepackage{booktabs} % professional-quality tables
\usepackage{amsfonts} % blackboard math symbols
\usepackage{nicefrac} % compact symbols for 1/2, etc.
\usepackage{microtype} % microtypography
\usepackage{lipsum}
%%%% Start: Added by Sanam
\usepackage{tcolorbox}
\usepackage{graphicx}
\usepackage{makecell}
\usepackage{float}
%%%% End: Added by Sanam
\title{Designing the Elements of a 2D Beam Scanner for a High Speed and High Resolution LiDAR}
%\title{A template for the \emph{arxiv} style}
\author{
Sanam Moslemi Tabriz %\thanks{Use footnote for providing further
% information about author (webpage, alternative
% address)---\emph{not} for acknowledging funding agencies.}
\\
Department of Electronics\\
Carleton University\\
% Pittsburgh, PA 15213 \\
% \texttt{[email protected]} \\
% %% examples of more authors
% \And
% Elias D.~Striatum \\
% Department of Electrical Engineering\\
% Mount-Sheikh University\\
% Santa Narimana, Levand \\
% \texttt{[email protected]} \\
% %% \AND
% %% Coauthor \\
% %% Affiliation \\
% %% Address \\
% %% \texttt{email} \\
% %% \And
% %% Coauthor \\
% %% Affiliation \\
% %% Address \\
% %% \texttt{email} \\
% %% \And
% %% Coauthor \\
% %% Affiliation \\
% %% Address \\
% %% \texttt{email} \\
}
%\author{
% David S.~Hippocampus\thanks{Use footnote for providing further
% information about author (webpage, alternative
% address)---\emph{not} for acknowledging funding agencies.} \\
% Department of Computer Science\\
% Cranberry-Lemon University\\
% Pittsburgh, PA 15213 \\
% \texttt{[email protected]} \\
% %% examples of more authors
% \And
% Elias D.~Striatum \\
% Department of Electrical Engineering\\
% Mount-Sheikh University\\
% Santa Narimana, Levand \\
% \texttt{[email protected]} \\
% %% \AND
% %% Coauthor \\
% %% Affiliation \\
% %% Address \\
% %% \texttt{email} \\
% %% \And
% %% Coauthor \\
% %% Affiliation \\
% %% Address \\
% %% \texttt{email} \\
% %% \And
% %% Coauthor \\
% %% Affiliation \\
% %% Address \\
% %% \texttt{email} \\
%}
\begin{document}
%\maketitle
%
%\begin{abstract}
%%\lipsum[1]
%Light Detection and Ranging (LIDAR) is a very accurate mapping technology in which light is shined toward an object and the reflected beam is observed to map the object precisely. A key part of LiDAR is beam steering. Conventional LiDAR systems use mechanical and/or thermal effect to do the beam steering which are costly and not efficient. In this research in order to reduce the cost and size and also increase the resolution, speed and field of view (FOV) we focus on a solid state silicon photonic method. The novelty of this research will be in solid state depletion based optical phased array (OPA) phase shifting and sophisticated nonuniform surface grating in order to achieve a hight speed, high resolution and wide 2D beam steering.
%\end{abstract}
%
%
%% keywords can be removed
%\keywords{LiDAR \and 2D Scanning \and Subwavelength grating (SWG)}
%%\keywords{First keyword \and Second keyword \and More}
%
%
%\section{Introduction}
%
%LiDAR market varies from autonomous vehicles to agriculture, archaeology, modelling of pollution and many more. With growing application specially in car and aviation industries the development of high speed and high resolution LiDAR device is essential. In general a LiDAR system consists of the following block as shown in figure \ref{lidarsys}
%%\begin{figure}[H]
%\begin{figure}[htb]
%\begin{center}
%\includegraphics[width=12cm, height=8cm]{Figures/lidar_block_diagram}
%\caption{A Typical LiDAR System}
%\label{lidarsys}
%\end{center}
%\end{figure}
%
%
%\begin{description}
%\item[Light source:] A tunable laser which can be either be off chip or integrated into the PIC. The source may be modulated and/or pulsed to allow for noise reduction and the detection of object distances and velocities.
%\item[Splitter:] Various elements can be used to split the source into physically separated optical channels. For a moderate number of channels, a series of splitters can be used, however, if a larger number of channels is desired a device such as star coupler may be used.
%\item[Amplification:] Functional requirements and optical loss may require the amplification of the optical signals. Integration of semi-conductor optical amplifiers (SOA) have been proposed for this purpose.
%\item[Phase Shifters:] Previous work has relied on thermo-optic phase shifters. These devices use local heaters to shift the index of refraction of a portion of the waveguide to introduce a phase shift in the optical signal.
%\item[Grating:] Beam emission is done through the surface gratings, with emission occurring at an angle determined by the grating pitch and the optical wavelength.
%\item[Sensors:] Sensors are responsible for beam collection. The reflected signal can be captured by a grating-based lens which couples the incident light into waveguides.
%\item[Electonic control unit:] Electrical signal processing is needed to determine the pulse timing, the amplitude of the signal and the relative wavelength of the reflected light (in order to use Doppler shifts to determine object velocities). Local oscillators and mixing can be used to generate beat frequencies to obtain such information. A key advantage of PIC technologies is the ability to integrate electronics and optical components on the same chip.
%\item[Target mapping:] The resultant electrical signal would then be routed off chip to a processing unit to create a rich 3D image of the environment.
%\end{description}
%
%%\lipsum[2]
%%\lipsum[3]
%
%\subsection{State of Art}
%\label{statofart}
%Photonics integrated circuits (PIC) is in centre of attention by optical researchers and optical device vendors because of its many advantages mainly the increased bandwidth, low cost and power consumption. Among the PIC techologys, Silicon photonics has gained an interested in current optical devices, because of its well established fabrication process. In this thesis we will focus on Silicon photonics as our fabrication platform.
%
%
%\subsection{Research Objective}
%\label{researchobjective}
%Optical phased arrays (OPA) and surface grating couplers have been commonly used in current beam steering methods. Basically the light is injected to an array of waveguides and gets couple off the surface at the end of waveguide. A tunable laser, the phase shift between the waveguide arrays and the surface grating will do a 2D scanning for the environment. In current literature the phase shifting is based on thermal effect using heaters, which is slow. In this research we will focus on solid state beam steering. We will use the carrier density changes in depletion region of tilted PN junctions on waveguides to create the phase shift. The big advantage of this method would be it's speed. The scanning rate will be much higher than heat based phase shifting. We are target high speed, sensetive applications.
%
%To increase the resolution of the scanning and to increase the field of view (FOV) we will design a sophisticated nonuniform grating coupler. In current litrature the tuning on the grating coupler is done by grating pitch. However the etch depth of the grooved has a huge effect on the beam couple off the chip. We are designing a non uniform etch depth grating to better control the angle and resolution of the outgoing beam.
%
%\section{Method of Design}
%\label{methodofdesign}
%Here is a block digram our circuit \ref{blockGEN}.
%\begin{figure}[H]
%\begin{center}
%\includegraphics[width=12cm, height=8cm]{Figures/pic3}
%\caption{Circuit block digram }
%\label{blockGEN}
%\end{center}
%\end{figure}
%
%%
%\subsection{Phase Shifter}
%\label{phase shifters}
%The phase difference between the arrays will cause the beam scanning in direction of $\phi$
%Each waveguide well be doped with titled rectangles of p and n to create a PIN junction. The width of the depletion region and the carrier density in each waveguide is controlled by the applied reverse bias voltage to each waveguide. The tuning voltages are controlling the phase difference between the phased arrays which in return will control the scanning range in $\phi$ direction.
%
%\begin{equation}
%\beta_i \, = \, k_0 N_{eff}(n_i,p_i) \qquad i \; = \; 1 \, \dots \, N
%\end{equation}
%where $N $ is the number of phased arrays.
%\begin{equation}
%\Delta \phi \, = \, \beta L
%\end{equation}
%
%where $L$ is the length of the doped waveguide.
%
%\begin{center}
%\textbf{\LARGE To be Completed}
%\end{center}
%%
\subsection{Grating Couplers}
\label{gratingCouples}
Grating couples are being increasingly used in coupling out beams from the waveguides. In the GC structure, the wave propagates through the input waveguide, the periodic structure of the grating area will convert the wave to the leaky wave coupling off the surface of the waveguide to the air. In Telecom industry fiber optics are placed on top of the GC to couple light into it, but we in this research are using free space propagation for the LiDAR application.
The basic grating coupler is a periodic structure of teeth and groove with a grating period of $d$ and the coupling angle of $\theta.$ \cite{pub2000}
\subsubsection{Choose core material}
\label{core}
To analyze the physics of the GC, we start with the wave propagation in the waveguide with a propagation of $\beta_{s}$, because of the periodic structure there will be a leakage process from the waveguide which results in coupling out the light into the free space. This leakage process highly depends on the physical structure of the waveguides and grating such as index of reflection of the materials, the groove depth($t_g$), the pitch size($d$), length of grating ($L$) and the wavelength of the input light ($\lambda $). Because of the periodic structure of the GC, the leaky wave consist of an infinite space harmonic waves with a complex propagation of $k_n= \beta_n + i\alpha$ where $\alpha$ is the leakage factor and $\beta_n$ is the space harmonic propagation constant defined as below \cite{oe241821027beamwidth}, \cite{tamirPeng1977}:
\begin{equation}
\beta_n \, = \, \beta_0 \, + \, \frac{2n\pi}{d} \quad n=0 \, , \, \pm1 \, , \, \pm2 \, , \, \dots
\end{equation}
Obviously the propagation is slower in the waveguide than in the free space for $\beta_s \, > \, k_0 $ where $k_0 \, = \, \frac{2\pi}{\lambda}$. Note that usually the leakage factor $\alpha$ is a very small value so $\beta_0 \, \simeq \, \beta_s$ and so we will have:
\begin{equation}
\beta_0 > k_0 (=2\pi / \lambda)
\end{equation}
On the other hand according to brag condition the radiation angle from the GC to the air is defined by
\begin{eqnarray}
\nonumber \sin(\theta_n)=\frac{\beta_n}{k_0} \quad n=0 \, , \, \pm1 \, , \, \pm2 \, , \, \dots \\
\sin(\theta_n)=\frac{\beta_0 +\frac{2n\pi}{d}}{k_0}\quad n=0 \, , \, \pm1 \, , \, \pm2 \, , \, \dots
\end{eqnarray}
Therefore in order to have a valid $\theta_n$ we should have $|\frac{\beta_0 +\frac{2n\pi}{d}}{k_0}| <1$. We also know that $\beta_0 > k_0$. This conclude that n must be negative.
Depending on the physical structure there will multiple outgoing beams, however in our application we wanted to have one strong outgoing beam so we design our GC for $n \, = \, -1$. For this purpose we must satisfy the following conditions:
\begin{eqnarray}
\nonumber |\beta_{-1}| \, < \, k0 \\
|\beta_{-2}| \, > \, k0
\label{betaTop}
\end{eqnarray}
Similar for the lower region (substrate) we should satisfy
\begin{equation}
|\beta_{-2}| \, > \, k0\sqrt{\epsilon_s}
\label{betaLow}
\end{equation}
Note that since $\epsilon_s > 1$ then $|\beta_{-1}| \, < \, k0\sqrt{\epsilon_s}$ is automatically fulfilled.
Effective index of reflection (for thin film) is defined as
\begin{equation}
N_{eff} \, = \, \beta_s/k_0 \, = \, \beta_0/k_0
\end{equation}
Rewriting equations (\ref{betaTop}) - (\ref{betaLow}) we will have
\begin{eqnarray*}
|\beta_0 -\frac{2\pi}{d}| <k_0 & => & |N_{eff}-\frac{\lambda}{d}| < 1\\
|\beta_0 -2\frac{2\pi}{d}| >k_0 & => & |N_{eff}-2\frac{\lambda}{d}| > 1\\ \\
|\beta_0 -2\frac{2\pi}{d}| >k_0 \sqrt{\epsilon_s} & => & |N_{eff}-\frac{\lambda}{d}| > \sqrt{\epsilon_s} \\
\end{eqnarray*}
or
\begin{eqnarray}
N_{eff}-\frac{\lambda}{d} < 1 \\
\frac{\lambda}{d} -N_{eff} > 1 \\
2\frac{\lambda}{d} - N_{eff} >1 \\
2\frac{\lambda}{d} - N_{eff} > \sqrt{\epsilon_s}
\end{eqnarray}
Also to avoid the Bragg condition we must have
\begin{equation}
N_{eff} \, \neq \, \frac{\lambda}{d}
\end{equation}
In Figure (\ref{neff1}) we showed the region for valid parameters to choose for forward or backward propagations
\begin{figure}[H]
\begin{center}
\includegraphics[width=12cm, height=8cm]{Figures/neffConstraint}
\caption{Valid Regions for forward or backward propagations}
\label{neff1}
\end{center}
\end{figure}
\subsubsection{Generate a Narrow beam (small FWHM)}
\label{smallFWHM}
To achieve a high resolution beam scanning, the outgoing beams must be very narrow. The FWHM of the beam is highly depended to the group index of waveguide core ($N_{g_c}$) and the length of the grating $L$ as described in equation (\ref{lambdaFWHM}) \cite{hongChoSungFWHMsize}
\begin{equation}
\delta \lambda_{FWHM} \, = \, \frac{\lambda^2}{2 \pi L N_{g_c}}
\label{lambdaFWHM}
\end{equation}
To get a narrow beam we need to increase the length and/or increase the group index. However for increasing the length, there are some limitation. Increasing the length will increase the transmission loss and also that will increase the size of the device.
The other parameter to get us to a narrow beam is the group index, which is highly depended on the physical features of the grating such as the pitch size, duty cycle and etching depth. Among those the etching depth is process depended variable and there are limitation on it. Non uniform Shallow etching will have a huge impact on $N_{g_c}$ and $\delta \lambda_{FWHM} $, however fabrication for non uniform shallow etching is costly and difficult, therefore we are replacing the effect of etching depth by using sub wavelength grating on the materials to achieve the desired small FWHM.
\subsubsection{Effect of duty cycle}
\label{effectdutycycle}
\begin{equation}
N_{g} = f n_g \, + \, (1-f)n_t
\end{equation}
where $n_g$, $n_t$ and $N_{g}$ are index values for groove and teeth of GC and the grating area and $f$ is the duty cycle.
%\begin{center}
%\textbf{\LARGE To be Completed}
%\end{center}
\subsubsection{Design for minimum leakage}
\label{minleakage}
The duty cycle of the grating and the difference in permittivity of the grating and top cladding effects the leakage factor of the grating. For a rectangular grating this dependency in described as below:
\begin{equation}
\alpha \simeq (\epsilon_r \,- \, \epsilon_{cl})^2 \, \sin^2(\pi f /d)
\end{equation}
Where $\epsilon_r$ and $\epsilon_{cl}$ are the permittivity of the teeth and top cladding.
So to keep the leakage at minimum, we need to choose the teeth and top cladding from materials with not too far permittivity values. Here again we can take advance of our SWG design of the materials. We are going to use Silicon photonics platform and we don't have many options for our material selection, but using SWG we can engineer the materials with desired properties.
About the duty cycle we see that the leakage is at it's maximum value at $f \, = \,0.5$. So we need to design the duty cycle to be aways from 0.5.
\subsubsection{Design for thickness of layers}
\label{thicknessLayers}
The propagation in different region of the structure is given by
\begin{equation}
\gamma_{q_{-1}} \, = \, k_0\sqrt{\epsilon_q \, - \, (N \, - \frac{\lambda}{d})^2 }
\end{equation}
where $q$ identifies the layers (substrate, thin film, grating, cladding). Following the analysis in \cite{00248941} we know that for small values of $t_g$ ($t_{g}$ is the etch depth) where $|\gamma_{q_{-1}} t_g| <<1$ then $\alpha d \, \propto \, (t_g/d)^2$ meaning the decay is small but highly dependent on etch depth ($t_g$).\\
By increasing $t_g$ at some point the decay will be only of small oscillations with change of $t_g$ with a period of $\Lambda_{gn}$ given by
\begin{equation}
\Lambda_{g_{-1}} = \frac{2\pi}{\gamma_{g_{-1}} } = \frac{\lambda}{\sqrt{\epsilon_g \, - \, (N \, - \frac{\lambda}{d})^2} }
\end{equation}
For this crossover point we have $t_{g_{cross}} \, = \, \frac{\Lambda_{g_{-1}}}{4} $. So for $t_g \, < t_{g_{cross}} $ the decay is small but highly dependent on the thickness, on the other hand for $t_g \, > t_{g_{cross}} $ decay only have small variations with changing the thickness.
\subsubsection{pic2}
\label{pic2}
The outgoing beam has a FWHM divergence $\delta \psi$ with a $\psi_s$ steering range, the resolution is $\psi_s / \delta \psi$. Based on \cite{pic2} "To date, OPA steering resolution has been limited. To our knowledge, the widest demonstrated steering range of any OPA
was $51^{\circ}$; however, the beam divergence was relatively large $(3.3^{\circ})[9]$. The narrowest beam divergence was $0.3^{\circ}$; however, the steerable range was relatively small $(0.9^{\circ}) [12]$. The highest-resolution device had a resolution of 23, achieving the best ratio of steering range to beam divergence $(23^{\circ}$ and $1^{\circ}$, respectively) [10]."
The highest resolution in 2D using nonuniform etching and 32 emitters is about 23.
We using a simple and fabrication friendly method achieved resolution of 34 by sr
I wrote a MATLAB code where I define my constraints and parameters based on the above analysis and the code will define the physical features of the structure.
%
%%\lipsum[4] See Section \ref{sec:headings}.
%%
%%\subsection{Headings: second level}
%%\lipsum[5]
%%\begin{equation}
%%\xi _{ij}(t)=P(x_{t}=i,x_{t+1}=j|y,v,w;\theta)= {\frac {\alpha _{i}(t)a^{w_t}_{ij}\beta _{j}(t+1)b^{v_{t+1}}_{j}(y_{t+1})}{\sum _{i=1}^{N} \sum _{j=1}^{N} \alpha _{i}(t)a^{w_t}_{ij}\beta _{j}(t+1)b^{v_{t+1}}_{j}(y_{t+1})}}
%%\end{equation}
%%
%%\subsubsection{Headings: third level}
%%\lipsum[6]
%%
%%\paragraph{Paragraph}
%%\lipsum[7]
%\newpage
%\section{Examples of citations, figures, tables, references}
%\label{sec:others}
%\lipsum[8] \cite{kour2014real,kour2014fast} and see \cite{hadash2018estimate}.
%
%The documentation for \verb+natbib+ may be found at
%\begin{center}
% \url{http://mirrors.ctan.org/macros/latex/contrib/natbib/natnotes.pdf}
%\end{center}
%Of note is the command \verb+\citet+, which produces citations
%appropriate for use in inline text. For example,
%\begin{verbatim}
% \citet{hasselmo} investigated\dots
%\end{verbatim}
%produces
%\begin{quote}
% Hasselmo, et al.\ (1995) investigated\dots
%\end{quote}
%
%\begin{center}
% \url{https://www.ctan.org/pkg/booktabs}
%\end{center}
%
%
%\subsection{Figures}
%\lipsum[10]
%See Figure \ref{fig:fig1}. Here is how you add footnotes. \footnote{Sample of the first footnote.}
%\lipsum[11]
%
%\begin{figure}
% \centering
% \fbox{\rule[-.5cm]{4cm}{4cm} \rule[-.5cm]{4cm}{0cm}}
% \caption{Sample figure caption.}
% \label{fig:fig1}
%\end{figure}
%
%\subsection{Tables}
%\lipsum[12]
%See awesome Table~\ref{tab:table}.
%
%\begin{table}
% \caption{Sample table title}
% \centering
% \begin{tabular}{lll}
% \toprule
% \multicolumn{2}{c}{Part} \\
% \cmidrule(r){1-2}
% Name & Description & Size ($\mu$m) \\
% \midrule
% Dendrite & Input terminal & $\sim$100 \\
% Axon & Output terminal & $\sim$10 \\
% Soma & Cell body & up to $10^6$ \\
% \bottomrule
% \end{tabular}
% \label{tab:table}
%\end{table}
%
%\subsection{Lists}
%\begin{itemize}
%\item Lorem ipsum dolor sit amet
%\item consectetur adipiscing elit.
%\item Aliquam dignissim blandit est, in dictum tortor gravida eget. In ac rutrum magna.
%\end{itemize}
%
\bibliographystyle{unsrt}
\bibliography{references} %%% Remove comment to use the external .bib file (using bibtex).
%%% and comment out the ``thebibliography'' section.
%%%% Comment out this section when you \bibliography{references} is enabled.
%\begin{thebibliography}{1}
%
%\bibitem{kour2014real}
%George Kour and Raid Saabne.
%\newblock Real-time segmentation of on-line handwritten arabic script.
%\newblock In {\em Frontiers in Handwriting Recognition (ICFHR), 2014 14th
% International Conference on}, pages 417--422. IEEE, 2014.
%
%\bibitem{kour2014fast}
%George Kour and Raid Saabne.
%\newblock Fast classification of handwritten on-line arabic characters.
%\newblock In {\em Soft Computing and Pattern Recognition (SoCPaR), 2014 6th
% International Conference of}, pages 312--318. IEEE, 2014.
%
%\bibitem{hadash2018estimate}
%Guy Hadash, Einat Kermany, Boaz Carmeli, Ofer Lavi, George Kour, and Alon
% Jacovi.
%\newblock Estimate and replace: A novel approach to integrating deep neural
% networks with existing applications.
%\newblock {\em arXiv preprint arXiv:1804.09028}, 2018.
%
%\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.7279173515,
"avg_line_length": 49.8711943794,
"ext": "tex",
"hexsha": "7d21a0c464fd0d686ccb7dc9cfb537cec43f2931",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "157e8c67b0921afc2e0e5586282fca24ee1fc9a9",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sanammt/lidar",
"max_forks_repo_path": "docs/papers/template.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "157e8c67b0921afc2e0e5586282fca24ee1fc9a9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sanammt/lidar",
"max_issues_repo_path": "docs/papers/template.tex",
"max_line_length": 847,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "157e8c67b0921afc2e0e5586282fca24ee1fc9a9",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sanammt/lidar",
"max_stars_repo_path": "docs/papers/template.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-07T13:07:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-08T02:23:57.000Z",
"num_tokens": 6039,
"size": 21295
} |
%!TEX root = ../template.tex
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% chapter4.tex
%% NOVA thesis document file
%%
%% Chapter with lots of dummy text
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\typeout{NT FILE chapter4.tex}
\chapter{Data Preprocessing}
\label{cha:data_preprocessing}
\hspace{10px}By definition, data preprocessing is the set of all data transformation, manipulation or dropping procedures before its use in supervised machine learning algorithms. In a real-world data science project, data preprocessing is one of the most important steps to be taken to ensure the success of a model, that is, between two models with the same data set the one that has undergone the proper data preprocessing procedures and feature engineering is what will have a more noticeable results. The figure below shows all the steps to be taken in the construction of a viable machine learning predictive algorithm, in it we can see that data preprocessing is an indispensable step before the application of any machine learning algorithm.
\begin{figure}[h]
\centering
\includegraphics[width=0.9\textwidth,height=0.2\textheight]{Chapters/Figures/data preprocessing.png}
\caption{The process of machine learning}
\label{fig:preprocessing}
\end{figure}
The inability of machine learning and data mining algorithms to work with raw data further reinforces the need to apply transformations to the data to bring it into a more understandable formats. Also, in real-world data, data representation is often feature intensive and having irrelevant and redundant information or noisy and unreliable data makes knowledge discovery during the training phase much more difficult. Data preprocessing can also affect how the results of the final data processing can be interpreted. Which, in this case, can have a devastating consequence in the interpretation of the results and future diagnoses \cite{Paolo}. The main function of data preprocessing is to check the quality of the data before any analysis\cite{Pyle}, especially in computational biology. Such action is performed through 3 tasks: Data cleaning, Data editing and Data reduction. Within each task, there are several steps that will be covered later in this chapter and will be important for handling and cleaning the data set before applying feature selection and extraction algorithms, such as split data into training and test data sets, handling missing values and correlated features, taking care of categorical features etc.
\section{Missing data} % (fold)
\label{sec:missing_data}
\hspace{10px}Missing data is a daily problem that affects any data-related work and can show up in any type of data. By definition Missing data (or missing values) are data values that are not stored for a variable in the observation of interest, is common to find in almost all surveys and can have a significant effect on the tools that can be drawn from the data\cite{Graham}. These data types are defined as unavailable values and can be of any type, from missing string, incomplete resource, missing files, incomplete information, data entry error, etc. They can have different representations, from "?", or -999 and often by "n/a"\ or "null", in the image \ref{fig:dataset} it is possible to verify the existence of these same values with the representation of NaN in the features "GDC\_FILTER"\ and "COSMIC".
The absence of these values can cause several problems during and after the application of the algorithms. The absence of data reduces the statistical power of the probability of the test rejecting the null hypothesis when it is false. It can cause bias in the estimation of parameters and reduce the representativeness of the samples, i.e the sample loses relevance. All of this can complicate the analysis of the study and impair its validity, which ultimately leads to invalid conclusions.\cite{Hang} With the evolution and development of new algorithms and automatic learning packages, there are already some capable of detecting and automatically dealing with data absent. However, it does not remove the need to transform and analyze the missing data manually. Among the numerous strategies for dealing with missing data the two most common is to replace all missing values by a fixed value, for example zero, or by the average of all available values in the column. However, these approaches are not always the most correct, to know which is the most correct strategy to apply in our data set will depend on the domain and the type of missing data.
\subsection{Detecting Missing data and their type} % (fold)
\label{sec:document_structure}
\hspace{10px}According to Donal B. Rubin in \cite{Rubin} and in the book \cite{Berthold} missing data is divided into 3 types of missing data. This division is made by taking into account the mechanisms of missingness, the description of the different types is made below, going from the simplest to the most general.
\begin{itemize}
\item \textbf{Missing completely at random (MACR):} It means that the probability of a value being absent does not depend on known values or the missing value itself, but on some other reason. The existence of missing data due to equipment failure or samples being lost or unsatisfactory, are examples of MCAR. The statistical advantage of these types of missing data is that the analysis remains impartial, that is, the estimated parameters are not influenced by the absence of data.
\item \textbf{Missing at random (MAR):} Data is considered MAR when the probability of a missing instance may depend on other known values but not on the missing value itself. Research example: whether or not there is data referring to a feature, the lack of data does not depend on the feature itself, but may depend on values of another feature. Although randomness does not produce bias, MAR data cannot be ignored. If a missing variable is MAR, the possibility of there being a dropout of that variable in each case, is conditionally independent of the variable, being possible to predict the value through other variables observed.
\item \textbf{Missing not at random (MNAR):} These are data whose characteristics do not correspond to those of MCAR or MAR, so they fall into the category of Missing not at random (MNAR). The probability that an instance is missing depends on the value of the variable itself. These type of data are very problematic since the missingness is specifically related to what is missing, the only way to get an unbiased estimate of the parameters is to model the missing data, which requires a greater understanding and domain knowledge of the lost variables.
\end{itemize}
After understanding the differences between MACR, MAR and MNAR we are able to classify the type of missing data for our dataset. The data dictates information, specific characteristics of each mutation based on a set of features, each variable within each feature is independent of each other, i.e, there is no relationship of variables within the same feature. The type of information contained in features varies between float, int64, bool and object, thanks to this variety of data types and a huge discrepancy in the number of NaN's in each feature (some with only 1 or 6 and others with 100\% or 80\% with ocupied with nan), the probability of a variable being dependent on another from a different feature is low. Nevertheless, since we are dealing with mutations from cancer cells, the fact that there is no information in a given feature may well be a consequence of the value (or what it represents) of another feature within the same mutation, for example, depending on the gene where the mutation was discovered, its representation in the data table may or may not have compromised values or have some data missing. Thus, the NaN are of the MAR type which means that some values can be predicted through other observed variables.
However, in all, there is still a large percentage of missing data, in a data set with 1,253,880 cells of information 40.86\% of this information is occupied with NaN values which does not bring any additional information to our problem. With \textit{X.info(verbose = True, show\_counts = True)} it is possible to analyze in detail the data type of each feature, the number of non-null cells within each feature, among other information relative to the data set. The figure \ref{fig:missing_values} represents, in a downward manner, the number of null cells corresponding to each feature after applying the following lines of code: \textit{m = X.isna().sum().tolist()} and \textit{m.sort(reverse =True)}.
\begin{figure}[h]
\centering
\includegraphics[width=0.9\textwidth,height=0.1\textheight]{Chapters/Figures/missing_values.png}
\caption{List of missing data from a given data set}
\label{fig:missing_values}
\end{figure}
\subsection{Handling Missing Values} % (fold)
\label{sec:handling_missing values}
\hspace{10px}By determining the list of missing data for a specific data set, it becomes easier to see the percentage of missing data corresponding to each feature. The code below represents the calculation of that same percentage, each value is stored in the list \textit{mising\_ratio} in the same position where it is entered in the original list. When analyzing the percentages of \textit{missing\_ratio} we see that there is a huge discrepancy in the missing values, there are features with 100\%, 90\%, 86\%, 37\%, 10\% and even with 0.8\% and 0.6\% of missing values. Of course, some of these characteristics will not contribute or almost nothing to the analysis of values, namely the characteristics with 100\% and 90\%, it is difficult to estimate the remaining values without having additional information, hence the need to deal with these features.
\begin{lstlisting}[language=Python]
#percentage of missing data
missing_ratio = []
for i in m:
value = (i/len(X))*100
missing_ratio.append(value)
missing_ratio
\end{lstlisting}
Over the years, several methods have been presented to deal with missing values, these methods are separated into two main groups: deletion and imputation. Within the deletion group there are 3 most common methods: list-wise exclusion, pairwise exclusion and elimination (dropping) features. List-wise exclusion is the most used approach for dealing with missing data, it consists of omitting the cases where there is missing data and analyzing the remaining data. This approach, also known as complete-case analysis (CCA), only works if the sample is large enough and the missing data is of the MCAR type. Since our data is of the MAR type, this strategy is not recommended and a simple way to confirm this, by deleting all the lines where one or more values are missing, we quickly see that the data set is empty, because there is at least one missing value in each of the rows of the data set. As with list-wise exclusion, pairwise exclusion or available case analysis (ACA) is only recommended for missing data that are MCAR. In this case, only the missing observations will be ignored and the analysis is carried out on the remaining variables. Since pairwise exclusion uses all the observed information, it preserves more information than a list-wise. However, the analysis becomes deficient in the presence of too many missing observations and it becomes impossible to compare different analyzes (data sets) since only all available cases are considered. In the presence of too much missing data for a variable, a viable option would be to exclude the variable or column from the dataset given a certain threshold, eg 60\% or 90\%. This method is not advisable because a more adequate analysis of the data is necessary and there is some kind of improvement in the model's performance after the variable is excluded. However, for our problem at hand, this would be the most appropriate strategy. The presence of variables with 0\% information (100\% missing) can bring problems in future analysis, poor precision or false conclusions. The best way to deal with these variables would be to exclude all those that have all values with NaN,\ \textit{pandas.DataFrame.dropna} is the method used to remove all missing values by adding the parameters \textit{axis = 1}, \textit{how = all}, and \textit{inplace=True} it is possible to exclude the columns with NaN in all row values.
Unlike deletion approaches, the aim of imputation methods is to fill the missing values with a more reasonable values. The use of deletion approaches to discard samples (rows) or entire features (columns) causes loss of information, which is not always the intended solution, which makes imputation the most explored approach. Following the same structure, the imputation techniques are divided into two subgroups: single imputation and multiple imputation. In single imputation, only one imputation value is generated for each of the missing variables. One of the disadvantages of this strategy is that the generated value is treated as the true value, omitting the fact that the imputation method does not provide the exact value and so makes it ignore the uncertainty of missing values. Most simple imputation methods follow three main procedures: replacement using existing values, replacement using statistical values and replacement using represented values. The use of each of these procedures will depend on the type of values where they will be applied. Some only work on numeric values while others work with both numeric and nominal columns. Table \ref{table:1} bellow, succinctly shows the categorization of these strategies.
\begin{table}[h!]
\centering
\begin{center}
\begin{tabular}{ | m{5.5em} | m{5cm}| m{5cm} | }
\hline
\textbf{Replacement using:} & \textbf{Only for numerical features} & \textbf{For both numerical and categorical features} \\
\hline
\textbf{Existing values} & maximum or minimum & previous, next or fixed value\\
\hline
\textbf{Statistical values} & mean, mode, median and average or linear interpolation & most frequent value\\
\hline
\textbf{Represented values} & regression algorithms & k-Nearest, regression and classification algorithms \\
\hline
\end{tabular}
\caption{Characterization of Single imputation methods}
\label{table:1}
\end{center}
\end{table}
In multiple imputation, as the name implies, several/multiple imputed values are generated for each of the missing observations, meaning that several data sets with different imputation values are created. This is an imputation approach based on statistics and on the contrary of simple imputation methods, which have the disadvantage of not considering the uncertainty of the imputed values, which leads to the imputed values considered as real values, which in turn do not take into account the standard error causing bias in the results\cite{Azur}. Multiple imputation creates multiple "complete"\ data sets capable of filling in the missing values multiple times. The best known algorithm is Chained Equation Multiple Imputation (MICE). However, this strategy is not always the best, the fact that multiple datasets are created can lead to an increase in algorithm complexity and memory problems.
To make data analysis and processing easier, the chosen approach was to create functions that analyze sections of the data set. In this case, only columns with values of type float were chosen, since they have the largest number of NaN values, they require greater care and special attention. The \textit{handle\_nan\_values} function, shown below, represents the process of eliminating the columns that are below a given threshold and applying a imputation technique to fill the remaining NaN values present in the columns, with a \textit{mean} or \textit{most\_frequent} strategy depending on the type of data to be applied.
\begin{lstlisting}[language=Python]
def handle_nan_values(X):
#Remove columns with all row values NaN
X.dropna(axis=1, how="all", inplace = True)
#Remove columns that are below treshold 0.70(70%)
length = len(X)
thresh = length*0.70
X.dropna(axis=1, thresh=int(thresh), inplace = True)
#Apply imputation algorithm to fill remaining nan values
nan_columns = X.loc[:, X.isna().any()].columns
for n in nan_columns:
if X[n].dtypes == object:
imputer = SimpleImputer(missing_values=np.NaN,
strategy='most_frequent')
X[n] = imputer.fit_transform(X[n].values.reshape(-1,1))
else:
imputer = SimpleImputer(missing_values=np.NaN,
strategy="mean")
X[n] = imputer.fit_transform(X[n].values.reshape(-1,1))
return X
\end{lstlisting}
\hspace{10px} As a first step, the columns that have all their values as NaN are removed, as they do not contribute any additional information, are considered as noisy data or meaningless data, which are not able to be read or used by the programs and algorithms. Then, we proceeded to the selection of a limit for the minimum number of useful information contained in each column. The threshold chosen was 70\%, that is, only columns that contain at least 70\% of information are preserved (this calculation is done by the number of lines of each data set, for example, 10449 lines then the threshold would be 10449*0.7), the choice of this value is due to the fact that most of these columns contain little or no information, for example of the 21 columns with float values only 1 contains 90\% of information and only 2 are above 70\% as the remainder fall in the range of 10\% or less making it almost impossible to accurately predict the remaining missing values.
The calculation of the NaN values for columns of types different than type object was done through the use of a imputation technique. From \textit{sklearn.impute} we use the SimpleImputer with a strategy of \textit{mean} which replaces the missing values using the mean along each column. This has the benefit of not changing the sample mean for each column. After that, all that remains is to deal with the NaN values in the object type columns.
Initially the replacement was done through existing values using the \textit{pad} method also known as forward replacement, where the last valid observation is used as the replacement value, until reaching the next valid value within each column. However, this method as well as ffill method only do forward value replacement starting at the first valid value (other than NaN) and if there is any NaN value at the beginning of the column these are ignored by the method and remain untreated, the same applies to bfill or backfill that propagate the valid values backwards. Another problem with these methods is that in the presence of a feature with only one valid value, all other values would be replaced by this one, making this feature biased, thus manipulating the result into favoring this feature. And depending on the position of the first valid value, these methods may or may not work. Therefore, the best solution was to apply the same imputation technique used for the other value types, only this time with the \textit{most\_frequent} strategy as strategy \textit{mean} or \textit{median} don't work with object type values.
\section{Correlated Features}
\label{sec:correlated_features}
\hspace{10px}As the number of NaN values, up to this point, is non-existent, the next step is to check the existence of correlated features if so the best solution is to drop them. By definition, correlation means a mutual relationship between two or more things, it is a statistical expression that concerns how close two variables are to having a linear relationship with each other\cite{Vishal}. The purpose of the correlation is to see if the values from two features, whether are small or large, are paired with each other. In statistics, this is measured by an adjustment function called the correlation coefficient \cite{corrwebsite}. This coefficient varies between -1 and 1, if is between [0,1] then the values are positively correlated, otherwise they are negatively correlated.
In regression, the main focus is trying to predict dependent values through one or more independent values. To know how well a regression model fits the data, determining the model's variance is the best choice and by the assumption that the dependent variable is normally distributed with variance \(\sigma^2\), then we can say that \(\sigma^2 = (X^TX)^{-1}\). For the model to be stable, the value of \(\sigma^2\) mus be low. If \(\sigma^2\) is high means the model is very sensitive to the data and might not perform well. With the existence of highly correlated features within our data set makes the variance of the weight vector of the regression model be large. So, when two features have high correlation, the best solution is to drop one of the two features.
A piece of code extracted from \href{https://chrisalbon.com/code/machine_learning/feature_selection/drop_highly_correlated_features/}{Chris Albon’s} work is provided below. In it, the correlation matrix is created to later find the features that have correlation greater than 95\%.
\begin{lstlisting}[language=Python]
def get_correlated_cols(X):
# Create correlation matrix
corr_matrix = X.corr().abs()
# Select upper triangle of correlation matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape),
k=1).astype(bool))
# Find index of feature columns with correlation greater than 0.95
for column in upper.columns:
if any(upper[column] > 0.95):
to_drop = [column]
return to_drop
\end{lstlisting}
\section{Duplicated Columns and Similar Value Columns}
\label{sec:duplicated_similar_columns}
\hspace{10px}Once the columns with high correlation are already eliminated, we move on to the removal of duplicate columns and columns with similar values. In the context of data quality, the removal of duplicates consists of identifying and then removing instances where there is more than one record of the same instance.
Data are collected from several clinical and genomic cancer studies. As researchers update studies or make new discoveries, they often enter their data more than once. This, in turn, leads to data sets having more than one equal feature, possibly with conflicting information.
Identifying and removing or merging these duplicate data set features will create a full version of the truth about the data set, save time and resources by not running identical data multiple times during feature selection or extraction algorithms and the data set becomes more accurate, as no information is lost when removing duplicate features.
\begin{lstlisting}[language=Python]
def get_duplicated_value_cols(X):
count = 0
deleted = 0
#get matrix of duplicated columns
duplicated_columns = X.T[X.T.duplicated(keep=False)].T.columns
#remove duplicated columns
for col in duplicated_columns:
if col == "EXON":
X.drop(col,axis=1, inplace = True)
deleted += 1
elif col == "Feature":
X.drop(col,axis=1, inplace = True)
deleted += 1
elif col =="Reference_Allele":
X.drop(col,axis=1, inplace = True)
deleted += 1
count += 1
return X
\end{lstlisting}
With the help of the python framework it is possible to create a matrix that identifies the duplicated columns, this matrix is implemented in the \textit{get\_duplicated\_value\_cols} function represented above, followed by an iteration to select the columns to be removed. According to the "GDC Data User's Guide"\ the EXON, Feature and Reference\_Allele columns are already represented in the data set but with a different name, that is, taking into account the characteristics of each column and what each one represents these were the columns chosen to be removed.
Now, finally, it is necessary to find the features where the majority share of the values are akin. The principle is the same as duplicated columns, therefore these features will not help us to differentiate between driver mutations and so features where more than 90\% of the values are similar have been removed from the data set.
\begin{lstlisting}[language=Python]
def get_similar_value_cols(X):
thresh = 90
similar_values = []
for c in X.columns:
#percentage of each value inside columns
percent_vals = (X[c].value_counts()/len(X)*100).values
#filter columns where more than 90% values are similar
if percent_vals[0] > thresh and len(percent_vals) > 2:
similar_values.append(c)
return similar_values
\end{lstlisting}
\section{Outliers} % (fold)
\label{sec:outliners}
\hspace{10px} As stated in \cite{Kuhn} outliers are samples that are exceptionally far from the mainstream of the data. With this definition, it is possible to say that an outlier is an observation highly different from the rest. These observations can distort the distribution of our data and parameters like means, standard deviations, and correlations. Hence, it is important to deal with them while preparing our data before further analysis.
However, it is very difficult to define and identify outliers in general because of the specifics of the data set. It is mandatory to first analyze the observations before deciding whether a given value is an outlier or not. Dropping outliers can be very dangerous and only done under specific conditions, such as, when we know in advance that the outlier is completely wrong (eg. knowing the range our data falls in and remove outliers outside that range), working with large data, the sample won’t be hurt by dropping questionable outliers and if the outlier does not change the results but does affect assumptions.
Since our results are critical, identify which mutation is a driver mutation, even the smallest of changes in the data set will make a difference. Sometimes these changes can be legitimate observations and the reason why this specific mutation was selected as a driver mutation. Another reason not to remove outliers is if there is a large number of outliers in the data set. Outliers are rare, so if a data set has, lets say, 20\% are outliers means that there's something of interest within the data set that requires further analysis. One way to check this is to perform the \textbf{Z-Score} test where the z value is between -3 and 3 and all values that fall outside this range are considered outliers. After performing this test the result was 2046 values considered as outliers
which corresponds to 20\% of our data set.
In this situation, it is not recommended to discard outliers and the analysis should be run both with and without them. This time the analysis of the algorithms will be perform only with the outliers.
\section{Categorical Features} % (fold)
\label{sec:categorical_values}
\hspace{10px}The next step will be to deal with categorical features. As machine learning models use mathematical equations categorical data are not accepted, therefore, it is necessary to convert them to integers. There are currently only 2 ways to do this, using the Label Encoding or One Hot Encoding method. The big difference between the two methods is that unlike One Hot Encoding which creates a column for each categorical value and this column has a value of 1 if this value exists in the initial data otherwise it will be 0, in Label Encoder the categorical values themselves are converted to numeric labels.
The lack of creating additional columns makes Label Encoding the ideal method to apply in our data set. The creation of new columns only leads to an exponential increase in the size of the data set from 9.5MB to more than 2.5GB of information which makes the application of feature extraction and selection algorithms costly on a temporal and spatial level.
\begin{lstlisting}[language=Python]
#Handling categorical values for variable X
def handle_categ_values(X):
labelencoder_x = LabelEncoder()
#Execute LabelEncoder for each categorical column
for n in X.columns:
if X[n].dtype != int:
X[n] = labelencoder_x.fit_transform(X[n])
return X
#Handling categorical values for variable Y
labelencoder_y= LabelEncoder()
Y = labelencoder_y.fit_transform(Y)
\end{lstlisting}
The code above describes the application of the Label Encoder method for the X and Y variables mentioned above. Here, a \textit{LabelEncoder} object is instantiated, the \textit{fit\_transform} method makes the \textit{LabelEncoder} fit the desired column and proceeds with its transformation and application, it is the simplified way to apply first the \textit{fit} method and then the \textit{transform} method. Applying the \textit{info} method to both X and Y shows us that the categorical values, in this case object and boolean, have all been converted to integer values and are ready for the next step of data preprocessing. To ensure that all data in X are of the same type, method \textit{fit\_transform} is applied to all columns of the data set.
\section{Train-Test Split} % (fold)
\label{sec:test_train_split}
\hspace{10px}Although the procedure of separating data into train data and test data is one of the most important steps in machine learning, to fulfill our goal, this step will only be applied after using feature selection and extraction algorithms. These algorithms can only be applied to the original data set and before any separation of the data into testing data and training data, if this does not happen, null values will reappear and it becomes more difficult to treat them in the middle of the algorithm implementation. The train-test split procedure is mainly used to estimate the performance of machine learning algorithms when they are used to create results on unused data to train the model. It's a quick and easy-to-run procedure, whose results allows you to compare the performance of machine learning algorithms for predictive modeling problems.
Train-test, as the name implies, is to convert or split the original data set into 2 subsets the test and the training, where the training data set is used to fit the machine learning model and the test data set is used to later evaluate this same model.
The scikit-learn library has a innate function named \textit{train\_test\_split}. For this function, X and Y will be passed-in as arguments which splits X and Y with a, let's say 30\% for the \textit{test\_size} leaving 70\% for the \textit{train\_size}, successfully splitting between x\_train, x\_test, y\_train, and y\_test with a \textit{random\_state} of 42.
\section{Features Scaling} % (fold)
\label{sec:feature_scaling}
\hspace{10px} This is the last step of data preprocessing, which is scaling and normalization of the data set. The purpose of data scaling data is transforming the data so that it fits within a specific scale. On the other hand, normalization is to change the data so it can be described as a normal distribution. Normalization will only be used prior to machine learning techniques that assumes the data is normally distributed such as LDA, ANOVA and t-test.
It has been proven by testings that the Machine Learning and Deep Learning algorithms perform better with a normalized and scaled data set than with a non-normalized ans non-scaled data set. StandardScaler and Normalization are the 2 possible ways to normalize the data. Both StandardScaler and Normalization are very similar, however StandardScaler is simpler to apply than Normalization.
By importing StandardScaler from the sklearn.preprocessing libary, similar to LabelEncoding method, only this time the StandardScaler.fit\_transform will be apply on our X data set. After applying all the data preprocessing steps, the data set will be similar to Figure \ref{fig:xtrain_data} and ready to be used in the algorithms described in the following chapters.
\begin{figure}[h]
\centering
\includegraphics[width=0.7\textwidth,height=0.17\textheight]{Chapters/Figures/standardScaler.png}
\caption{Normalized X data}
\label{fig:xtrain_data}
\end{figure} | {
"alphanum_fraction": 0.7797952303,
"avg_line_length": 122.4583333333,
"ext": "tex",
"hexsha": "6851ab48fc42b23090ba640e1f89962285cb7e83",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "39bb6ed072bdf48fdc8798736ad329bf0cfff997",
"max_forks_repo_licenses": [
"LPPL-1.3c"
],
"max_forks_repo_name": "Tomates96/final-thesis",
"max_forks_repo_path": "Chapters/chapter3.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "39bb6ed072bdf48fdc8798736ad329bf0cfff997",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"LPPL-1.3c"
],
"max_issues_repo_name": "Tomates96/final-thesis",
"max_issues_repo_path": "Chapters/chapter3.tex",
"max_line_length": 2381,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "39bb6ed072bdf48fdc8798736ad329bf0cfff997",
"max_stars_repo_licenses": [
"LPPL-1.3c"
],
"max_stars_repo_name": "Tomates96/final-thesis",
"max_stars_repo_path": "Chapters/chapter3.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 6878,
"size": 32329
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% UMB-CS110-2015S: Introduction to Computing
% Copyright 2015 Pejman Ghorbanzade <[email protected]>
% Creative Commons Attribution-ShareAlike 4.0 International License
% More info: https://github.com/ghorbanzade/UMB-CS110-2015S
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\def \topDirectory {../..}
\def \texDirectory {\topDirectory/src/main/tex}
\documentclass[10pt, compress]{beamer}
\usepackage{\texDirectory/template/style/directives}
\input{\texDirectory/template/config}
\usepackage{\texDirectory/template/style/beamerthemeUmassLecture}
\usepackage[school]{\texDirectory/template/pgf-umlcd/pgf-umlcd}
\doc{number}{12}
%\setbeamertemplate{footline}[text line]{}
\begin{document}
\prepareCover
\section{Course Administration}
\begin{frame}[fragile]
\frametitle{Course Administration}
Assignment 4 released. Due on April 16, 2015 at 17:30 PM.
\end{frame}
\begin{frame}[fragile]
\frametitle{Overview}
\begin{itemize}
\item[] Abstraction
\begin{itemize}
\item[] Introduction
\item[] Abstract Classes and Methods
\end{itemize}
\end{itemize}
\end{frame}
\plain{}{Abstraction}
\section{Introduction}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{How Do We Think?}
\begin{quote}
- Hi Sara! What's up?\\
+ Hey Mike! What's up?
\end{quote}
\end{block}
\begin{block}{How Do We Really Think?}
\begin{quote}
What did you do today?
\end{quote}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{Objective}
Write a program \texttt{Geometry.java} in which you can create cricles and squares and get their area, perimeter, color and whether they are filled or not.
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{UML Diagram for Class \texttt{Shape}}
\begin{figure}
\centering
\begin{tikzpicture}
\begin{class}[]{Shape}{0, 0}
\attribute{- numShapes: int}
\attribute{- filled: boolean}
\attribute{- color: String}
\operation{+ Shape()}
\operation{+ getNumShape(): int}
\operation{+ isFilled(): boolean}
\operation{+ setFilled(boolean filled)}
\operation{+ getColor(): String}
\operation{+ setColor(String color)}
\end{class}
\end{tikzpicture}
\end{figure}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{UML Diagram for Class \texttt{Circle}}
\begin{figure}
\centering
\begin{tikzpicture}
\begin{class}[]{Circle}{0, 0}
\attribute{- numCircles: int}
\attribute{- radius: double}
\operation{+ Circle()}
\operation{+ getPerimeter(): double}
\operation{+ getArea(): double}
\operation{+ getNumCircles(): int}
\operation{+ getRadius(): double}
\operation{+ setRadius(double radius)}
\end{class}
\end{tikzpicture}
\end{figure}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{UML Diagram for Class \texttt{Square}}
\begin{figure}
\centering
\begin{tikzpicture}
\begin{class}[]{Square}{0, 0}
\attribute{- numSquares: int}
\attribute{- length: double}
\operation{+ Square()}
\operation{+ getPerimeter(): double}
\operation{+ getArea(): double}
\operation{+ getNumSquares(): int}
\operation{+ getLength(): double}
\operation{+ setLength(double radius)}
\end{class}
\end{tikzpicture}
\end{figure}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Shape.java} (v2.0) (Part 1)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Shape {
// attributes and fields
private static int numShapes = 0;
private String color;
private boolean filled;
// constructors
public Shape() {
numShapes++;
}
// setter and getter for color
public void setColor(String someColor) {
this.color = someColor;
}
public String getColor() {
return color;
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Shape.java} (v2.0) (Part 2)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=17]{java}
// setter and getter for filled
public void setFilled(boolean state) {
this.filled = state;
}
public boolean isFilled() {
return filled;
}
// setter and getter for numShapes
public static int getNumShapes() {
return numShapes;
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Circle.java} (v2.0) (Part 1)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Circle extends Shape {
// fields and attributes
private static int numCircles = 0;
private double radius;
// methods
public double getArea() {
return Math.PI*Math.pow(radius,2);
}
public double getPerimeter() {
return 2*Math.PI*radius;
}
// constructors
public Circle(double someRadius) {
radius = someRadius;
numCircles++;
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Circle.java} (v2.0) (Part 2)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=17]{java}
// setter and getter for radius
public void setRadius(double someRadius) {
this.radius = someRadius;
}
public double getRadius() {
return radius;
}
// setter and getter for numCircles
public static int getNumCircles() {
return numCircles;
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Square.java} (v2.0) (Part 1)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Square extends Shape {
// fields and attributes
private static int numSquares = 0;
private double length;
// methods
public double getArea() {
return Math.pow(length,2);
}
public double getPerimeter() {
return length*4;
}
// constructors
public Square(double someLength) {
length = someLength;
numSquares++;
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Square.java} (v2.0) (Part 2)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=17]{java}
// setter and getter for length
public void setLength(double someLength) {
this.length = someLength;
}
public double getLength() {
return length;
}
// setter and getter for numSquares
public static int getNumSquares() {
return numSquares;
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Geometry.java} (v2.0)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Geometry {
public static void main(String[] args) {
Circle myCircle = new Circle(5);
myCircle.setColor("Red");
myCircle.setFilled(true);
double result1 = myCircle.getPerimeter();
System.out.println("Perimeter of circle: "+result1);
Square mySquare = new Square(4);
mySquare.setColor("Blue");
mySquare.setFilled(false);
double result2 = mySquare.getArea();
System.out.println("Area of square: "+result2);
System.out.println(Shape.getNumShapes());
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{UML Diagram (v2.0)}}
\begin{figure}
\centering
\begin{tikzpicture}
\begin{class}[]{Shape}{0, 0}
\attribute{numShapes: int}
\attribute{filled: boolean}
\attribute{color: String}
\operation{Shape()}
\end{class}
\begin{class}[]{Circle}{-3, -3.25}
\inherit{Shape}
\attribute{numCircles: int}
\attribute{radius: double}
\operation{Circle()}
\operation{getPerimeter(): double}
\operation{getArea(): double}
\end{class}
\begin{class}[]{Square}{3, -3.25}
\inherit{Shape}
\attribute{numSquares: int}
\attribute{length: double}
\operation{Square()}
\operation{getPerimeter(): double}
\operation{getArea(): double}
\end{class}
\end{tikzpicture}
\end{figure}
\end{block}
\end{frame}
\section{Abstract Classes and Methods}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{Problem Statement}
\begin{itemize}
\item[] Superclass can be instantiated.
\item[] Subclasses may not implement \texttt{getPerimeter()} and \texttt{getArea()} methods.
\end{itemize}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Evil {
public static void main(String[] args) {
Shape myShape = new Shape();
myShape.setColor("Red");
myShape.setFilled(true);
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{Proposed Solution}
Disallow instantiation of the \texttt{Shape} class.
Force all subclasses of \texttt{Shape} class to implement \texttt{getArea()} and \texttt{getPerimeter()} methods.
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{Definition}
Abstraction is the act of hiding internal details of implementation of behaviors of object and providing functionalities of the object instead.
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{Abstract Methods}
Abstract methods define signature of methods that must be implemented in subclasses.
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public abstract void getArea();
public abstract void getPerimeter();
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{Abstract Classes}
Classes declared as abstract may not be instantiated.
Abstract classes may or may not have abstract methods.
Classes with abstract methods must be declared as abstract.
\end{block}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public abstract class Shape {
public abstract double getArea();
public abstract double getPerimeter();
// other class members
}
\end{minted}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{Remember}
Subclasses extending an abstract class must implement its abstract methods or be declared as abstract.
\end{block}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Circle {
private double radius;
@Override
public double getArea() {
return Math.PI*Math.pow(radius,2);
}
@Override
public double getPerimeter() {
return this.radius*Math.PI*2;
}
// other class members
}
\end{minted}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Shape.java} (v3.0) (Part 1)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public abstract class Shape {
// fields
private static int numShapes = 0;
// attributes
private String color;
private boolean filled;
// constructors
public Shape() {
numShapes++;
}
// abstract methods
public abstract double getPerimeter();
public abstract double getArea();
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Shape.java} (v3.0) (Part 2)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=14]{java}
// setters and getters
public static int getNumShapes() {
return numShapes;
}
public void setColor(String someColor) {
this.color = someColor;
}
public String getColor() {
return color;
}
public void setFilled(boolean state) {
this.filled = state;
}
public boolean isFilled() {
return filled;
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Circle.java} (v3.0) (Part 1)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Circle extends Shape {
// fields
private static int numCircles = 0;
// attributes
private double radius;
// methods
@Override
public double getArea() {
return Math.PI*Math.pow(radius,2);
}
@Override
public double getPerimeter() {
return 2*Math.PI*radius;
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Circle.java} (v3.0) (Part 2)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=15]{java}
// constructors
public Circle(double someRadius) {
radius = someRadius;
numCircles++;
}
// setter and getter for radius
public void setRadius(double someRadius) {
this.radius = someRadius;
}
public double getRadius() {
return radius;
}
// setter and getter for numCircles
public static int getNumCircles() {
return numCircles;
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Square.java} (v3.0) (Part 1)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Square extends Shape {
// fields and attributes
private static int numSquares = 0;
private double length;
// methods
@Override
public double getArea() {
return Math.pow(length,2);
}
@Override
public double getPerimeter() {
return length*4;
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Square.java} (v3.0) (Part 2)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=15]{java}
// constructors
public Square(double someLength) {
length = someLength;
numSquares++;
}
// setters and getters
public void setLength(double someLength) {
this.length = someLength;
}
public double getLength() {
return length;
}
public static int getNumSquares() {
return numSquares;
}
}
\end{minted}
\end{block}
\end{frame}
\begin{frame}[fragile]
\frametitle{Abstraction}
\begin{block}{\texttt{Geometry.java} (v3.0)}
\begin{minted}[fontsize=\small,tabsize=8, linenos, firstnumber=1]{java}
public class Geometry {
public static void main(String[] args) {
Circle myCircle = new Circle(5);
myCircle.setColor("Red");
myCircle.setFilled(true);
double result1 = myCircle.getPerimeter();
System.out.println("Perimeter of circle: "+result1);
Square mySquare = new Square(4);
mySquare.setColor("Blue");
mySquare.setFilled(false);
double result2 = mySquare.getArea();
System.out.println("Area of square: "+result2);
System.out.println(Shape.getNumShapes());
}
}
\end{minted}
\end{block}
\end{frame}
\plain{}{Keep Calm\\and\\Think Object-Oriented}
\end{document}
| {
"alphanum_fraction": 0.6693926004,
"avg_line_length": 26.601754386,
"ext": "tex",
"hexsha": "69b38449b207d7d0b562f25950154d6622a00ed8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b12ded95ddec71cd45dd05dff773018f6879d37f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "UMB-CS110-2015S/Assignments",
"max_forks_repo_path": "src/main/tex/slides/ls12.tex",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "b12ded95ddec71cd45dd05dff773018f6879d37f",
"max_issues_repo_issues_event_max_datetime": "2019-03-17T16:39:11.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-08-22T15:44:45.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "UMB-CS110-2015S/Assignments",
"max_issues_repo_path": "src/main/tex/slides/ls12.tex",
"max_line_length": 159,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b12ded95ddec71cd45dd05dff773018f6879d37f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "UMB-CS110-2015S/Assignments",
"max_stars_repo_path": "src/main/tex/slides/ls12.tex",
"max_stars_repo_stars_event_max_datetime": "2020-05-03T18:41:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-03T18:41:40.000Z",
"num_tokens": 4415,
"size": 15163
} |
\section{Feature Extraction and Feature Fusion}
\label{sec:FeatureExtractionFusion}
As we have observed in our survey of Siamese trackers~\cite{ondrasovic2021siamese}, incremental improvements in feature extraction were often the major contribution of numerous works. With this in mind, we consider feature extraction a necessary part of any deep learning model design.
% ##############################################################################
\subsection{Residual Neural Networks}
\label{ssec:ResidualNeuralNetworks}
He~\etal{}~\cite{he2015resnet} remarked that deeper neural networks are more difficult to train. In this work, a residual learning framework to facilitate easier training of neural networks that were significantly deeper than their previously used counterparts was proposed. The explicit reformulation of the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions, led to a breakthrough in the utilization of deep neural networks.
The foundation of \glspl{resnet} is the adoption of skip connections that represent shortcuts to jump over certain layers. Typically, such models are implemented using double or even triple layer skips containing nonlinearities (\egtext{}, \gls{relu}) and batch normalization~\cite{ioffe2015batchnorm} in between. The primary reason for adding skip connections was to avoid vanishing gradient problems. As demonstrated in \figtext{}~\ref{fig:ResnetMotivation}, the degradation problem manifests itself in deeper networks when their accuracy shows signs of saturation followed by a rapid decline, but not as a result of overfitting.
Let $\func{H}{\vect{x}}$ denote the desired underlying mapping. The stacked nonlinear layers are then expected to fit a mapping $\func{F}{\vect{x}} = \func{H}{\vect{x}} - \vect{x}$. The original mapping is reformulated as $\func{H}{\vect{x}} = \func{F}{\vect{x}} + \vect{x}$. The initial hypothesis, which turned out to be correct, was that it is easier to optimize the residual mapping instead of the original, unreferenced mapping.
% ------------------------------------------------------------------------------
\begin{figure}[t]
\centerline{\includegraphics[width=0.8\linewidth]{figures/theoretical_foundations/resnet_motivation.pdf}}
\caption[\gls{resnet} motivation]{A motivation behind the \glspl{resnet}. The training error and the test error are greater for the deeper model than for the shallower model. Therefore, the inevitable conclusion is that in order to learn better networks, it takes more than just stacking more layers. \externalsrc{\cite{he2015resnet}}}
\label{fig:ResnetMotivation}
\end{figure}
% ------------------------------------------------------------------------------
% ##############################################################################
\subsection{Feature Pyramid Networks}
\label{ssec:FeaturePyramidNetworks}
\glsreset{fpn}
\gls{fpn}~\cite{lin2017fpn} is an extension to existing backbones used for feature extraction serving various tasks ranging from image classification, object detection, object tracking or even image segmentation. Its greatest strength is the combination of low-resolution, semantically strong features with high-resolution, semantically weak but discriminative features via a top-down pathway and lateral connections.
\figtext{}~\ref{fig:FPNVariousApproaches} compares competing methods of feature aggregation by their core principles. Regarding the \gls{fpn} itself, observe the two pathways in \figtext{}~\ref{fig:FPNVariousApproaches} \imgpartdesc{d}. The bottom-up pathway represents a feed-forward computation of the backbone, where one pyramid level corresponds to one stage. The output of the last layer of each stage will enrich the feature maps when processing the top-down pathway by the use of lateral connections. The top-down pathway consists of upsampling operations followed by $1 \times 1$ convolutions to align tensor channels dimensions and then element-wise addition of features.
% ------------------------------------------------------------------------------
\begin{figure}
\centering
\begin{subfigure}[t]{0.3\textwidth}
\centering
\includegraphics[width=\textwidth]{figures/theoretical_foundations/fpn_featurized_image_pyramid.pdf}
\caption[]{}
\end{subfigure}
% \hfill
\begin{subfigure}[t]{0.3\textwidth}
\centering
\includegraphics[width=\textwidth]{figures/theoretical_foundations/fpn_single_feature_map.pdf}
\caption[]{}
\end{subfigure}
\begin{subfigure}[t]{0.3\textwidth}
\centering
\includegraphics[width=\textwidth]{figures/theoretical_foundations/fpn_pyramidal_feature_hierarchy.pdf}
\caption[]{}
\end{subfigure}
% \hfill
\begin{subfigure}[t]{0.3\textwidth}
\centering
\includegraphics[width=\textwidth]{figures/theoretical_foundations/fpn_feature_pyramid_network.pdf}
\caption[]{}
\end{subfigure}
\caption[\gls{fpn}]{Four traditional approaches to feature aggregation. \imgpartdesc{a} Computing features on distinct image scales (computationally expensive); \imgpartdesc{b} the use of single scale features only (fast, but not robust); \imgpartdesc{c} Reusing pyramidal feature hierarchy (fast and robust); \imgpartdesc{d} the proposed \gls{fpn} - pyramidal feature aggregation in both directions. \externalsrc{\cite{lin2017fpn}}}
\label{fig:FPNVariousApproaches}
\end{figure}
% ------------------------------------------------------------------------------
% ##############################################################################
\subsection{Deep Layer Aggregation}
\label{ssec:DeepLayerAggregation}
\glsreset{dla}
A successor of the \gls{fpn} is the \gls{dla}~\cite{yu2019dla}, which emphasizes the importance of feature aggregation across multiple levels to merge information from different stages of input processing (\figtext{}~\ref{fig:DLAMotivation}). This technique shows significant improvements in both memory usage and performance. Unlike the skip connections, the \gls{dla} introduces more depth and sharing. There are two main different approaches to \gls{dla}, namely \gls{ida} and \gls{hda}. These two approaches above are independent as well as compatible enough to facilitate combining the two for even richer feature aggregation. A great advantage is that these structures are independent of the choice of backbone, thus preserving the compatibility with current and future networks.
% ------------------------------------------------------------------------------
\begin{figure}[t]
\centerline{\includegraphics[width=0.6\linewidth]{figures/theoretical_foundations/dla_comparison.pdf}}
\caption[\gls{dla} comparison]{A demonstration of unification of semantic and spatial information. The \gls{dla} architecture extends densely connected networks, \ietext{}, \glspl{densenet}, and \glspl{fpn}. This extension builds on the idea of skip connections for enhanced feature fusion. \externalsrc{\cite{yu2019dla}}}
\label{fig:DLAMotivation}
\end{figure}
% ------------------------------------------------------------------------------
\subsubsection{Iterative Deep Aggregation}
\gls{ida} aims at resolution and scale fusion. The process starts at the smallest scale and then iteratively merges larger (deeper) scales, which can be described as
\begin{equation}
\label{eq:IterativeDeepAggregation}
\func{I}{\vect{x}_1, \vect{x}_2, \vect{x}_3, \dots, \vect{x}_n} =
\begin{cases}
& \vect{x}_1 \text{ if } n = 1 \\
& \func{I}{\func{A}{\vect{x}_1, \vect{x}_2}, \vect{x}_3, \dots, \vect{x}_n} \text{ otherwise}
\end{cases},
\end{equation}
where $A$ is the aggregation node.
\subsubsection{Hierarchical Deep Aggregation}
This process of aggregation exploits a tree-like structure that combines layers spanning multiple levels of a feature hierarchy. The \gls{hda} with aggregation function $T_n$ with $n$ representing the depth can be formulated as
\begin{equation}
\label{eq:HierarchicalDeepAggregation}
\func{T_n}{\vect{x}} =
\func{A}{
\func{\subsup{R}{n - 1}{n}}{\vect{x}},
\func{\subsup{R}{n - 2}{n}}{\vect{x}},
\dots,
\func{\subsup{R}{1}{n}}{\vect{x}},
\func{\subsup{L}{1}{n}}{\vect{x}},
\func{\subsup{L}{2}{n}}{\vect{x}}
},
\end{equation}
where $A$ is the aggregation node. The functions $R$ and $L$ are defined as
\begin{equation}
\label{eq:HDAConvBlocksL}
\func{\subsup{L}{1}{n}}{\vect{x}} = \func{B}{\func{\subsup{R}{1}{n}}{\vect{x}}},
\quad
\func{\subsup{L}{2}{n}}{\vect{x}} = \func{B}{\func{\subsup{L}{1}{n}}{\vect{x}}}
\end{equation}
and
\begin{equation}
\label{eq:DLAConvBlocksR}
\func{\subsup{R}{m}{n}}{\vect{x}} =
\begin{cases}
& \func{T_m}{\vect{x}} \text{ if } $m = n - 1$ \\
& \func{T_m}{\func{\subsup{R}{m + 1}{n}}{\vect{x}}} \text{ otherwise}
\end{cases},
\end{equation}
where $B$ represents some convolutional block.
| {
"alphanum_fraction": 0.6720354556,
"avg_line_length": 73.4206349206,
"ext": "tex",
"hexsha": "2a7a48700881b7fd103900e1c3bc97a45b0a5596",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "68a3a6d1687ea43dc6cdfafcd5e6d9ce35f424e8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mondrasovic/phd_thesis",
"max_forks_repo_path": "tex/chapters/theoretical_foundations/sections/feature_extraction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "68a3a6d1687ea43dc6cdfafcd5e6d9ce35f424e8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mondrasovic/phd_thesis",
"max_issues_repo_path": "tex/chapters/theoretical_foundations/sections/feature_extraction.tex",
"max_line_length": 786,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "68a3a6d1687ea43dc6cdfafcd5e6d9ce35f424e8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mondrasovic/phd_thesis",
"max_stars_repo_path": "tex/chapters/theoretical_foundations/sections/feature_extraction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2217,
"size": 9251
} |
% !TeX root = thesis.tex
% !TeX spellcheck = en_GB
% !TeX encoding = UTF-8
\documentclass[12pt,a4paper,oneside]{amsbook}
\usepackage{thesis}
\begin{document}
\frontmatter
% Half-title
% Title page
\input{tex/title.tex}
% A nice quote
% Information (copyright notice, ISBN, etc.)
%\chapter*{Dedication}
%\label{cha:dedication}
%\input{tex/dedication.tex}
\tableofcontents
% Preface chapter
\chapter*{Preface}
\label{cha:preface}
\input{tex/preface.tex}
\chapter*{Notations}
\begin{description}
\item[$ {[n]} $] $ \{0, 1, 2, \dots, n\} $
\item[$ \mathbb{N} $] $ \{ 1, 2, 3, \dots \} $
\item[$ \mathbb{N}_0 $] $ \mathbb{N} \cup \{ 0 \} $
\item[$ \mu \mathcal{F} $] The class of all $ \mathcal{F} $-measurable functions.
\item[$ \floor{\cdot} $] The floor function. $ \floor{x} $ gives the largest integer smaller or equal to $ x $.
\item[$ \ceil{\cdot} $] The ceiling function. $ \ceil{x} $ gives the smallest integer larger or equal to $ x $.
\item[$ \sim $] Represents the discounted value when used on top of a quantity. For example, $ \tilde{S}_t = e^{-rt} S_t $
\end{description}
\subparagraph{Units of prices} Throughout the text, we have not specified any monetary unit for the prices of assets. This is because the theory and computations hold no matter what unit is chosen. The only care that needs to be taken is to have all quantities in the same unit; in case not, they should be converted to a uniform unit system.
\mainmatter
\chapter{Prologue}
\label{cha:prologue}
\input{tex/prologue.tex}
\chapter{Market models}
\label{cha:models}
\input{tex/models.tex}
\chapter{Asian options}
\label{cha:asian}
\input{tex/asian.tex}
\chapter{Cliquet options}
\label{cha:cliquet}
\input{tex/cliquet.tex}
\chapter{Epilogue}
\label{cha:epilogue}
\input{tex/epilogue.tex}
%\appendix
\backmatter
\listoffigures
\listoftables
%\listofalgorithms
\printbibliography
\end{document}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: t
%%% End:
| {
"alphanum_fraction": 0.6952141058,
"avg_line_length": 19.6534653465,
"ext": "tex",
"hexsha": "240c0f53e9825e76c0bc5939ca79b2032a150859",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-09-15T21:30:43.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-09-15T21:30:43.000Z",
"max_forks_repo_head_hexsha": "a32c9f1777f80a54c3d4a3fc8389748fe27739c0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "homdx/edu",
"max_forks_repo_path": "MathMods/Thesis/docs/thesis.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a32c9f1777f80a54c3d4a3fc8389748fe27739c0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "homdx/edu",
"max_issues_repo_path": "MathMods/Thesis/docs/thesis.tex",
"max_line_length": 342,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a32c9f1777f80a54c3d4a3fc8389748fe27739c0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "homdx/edu",
"max_stars_repo_path": "MathMods/Thesis/docs/thesis.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 627,
"size": 1985
} |
\subsection{Muon spectrometer}
Muon spectrometer~\cite{CERN-LHCC-97-022} is the outermost part of the ATLAS detector with an extremely large tracking system.
It measures a large range of muon momentum, and the accuracy is about 3\% at 100 GeV and 10\% at 1 TeV.
The muon spectrometer comprises three main parts: a magnetic field produced by three toroidal magnets;
a set of chambers measuring the tracks of muons with high spatial precision; and triggering chambers with accurate time-resolution.
Figure~\ref{fig:muon_dec} shows the schematic of ATLAS muon spectrometer that consists of four types of muon chambers
(\textit{MDT, CSC, RPC, TGC}) as well as the magnet systems (barrel and end-cap toroid).
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{figures/Detector/muon_all.png}
\caption{Cut-away view of the muon spectrometer in ATLAS~\cite{Sliwa:2013oua}.}
\label{fig:muon_dec}
\end{figure}
More details of four chambers are given as below:
\begin{itemize}
\item \textbf{Monitored Drift Tubes (MDT)}. MDTs offer precise measurement of momentum with the $|\eta|$ range up to 2.7.
%except in the innermost end-cap layer where the coverage is limited to $|\eta| < 2.0$.
The chambers include three to eight layers of drift tubes, with a diameter of 29.970 mm, operated with Ar/CO2 gas (93/7) at 3 bar. The average resolution can reach 80 $\mu$m per tube and 30 $\mu$m per chamber.
\item \textbf{Cathode strip chambers (CSC)}. CSCs are used in the forward region of $2 < |\eta| < 2.7$ in the innermost tracking layers, because of their good time resolution and high rate capability.
They are multi-wire proportional chambers (MWPC), in which the cathode planes are segmented into strips in orthogonal directions, allowing both coordinates to be measured based on the induced-charge distribution.
The resolution in the bending plane is about 40 $\mu$m and 5 mm in the transverse plane.
\item \textbf{Resistive plate chambers (RPC)}. The RPCs serve as fast triggers in the barrel region of $|\eta| < 1.05$ due to its high rate capability as well as its good time and spatial resolution.
They are gaseous parallel electrode-plate detector without any wires.
There are three concentric cylindrical layers around the beam axis working as three trigger stations, while each of them is composed of two independent layers to measure the transverse coordinates of $\eta$ and $\phi$.
\item \textbf{Thin gap chambers (TGC)}. TGCs are used as trigger system for the end-cap region of $1.05 < |\eta| < 2.4$, and works based on the same principle as multi-wire proportional chambers.
In addition to the measurement of MDT in bending direction, they also offer the second azimuthal coordinate as supplement.
\end{itemize}
| {
"alphanum_fraction": 0.7581908832,
"avg_line_length": 93.6,
"ext": "tex",
"hexsha": "0b377b6d227c920cfc2c6fa0c1784aabd23393ed",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "55ec32affb5c105143798989d78043467c88da8e",
"max_forks_repo_licenses": [
"LPPL-1.3c"
],
"max_forks_repo_name": "zhuhel/PhDthesis",
"max_forks_repo_path": "chapters/Detector/muon.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "55ec32affb5c105143798989d78043467c88da8e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"LPPL-1.3c"
],
"max_issues_repo_name": "zhuhel/PhDthesis",
"max_issues_repo_path": "chapters/Detector/muon.tex",
"max_line_length": 226,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "55ec32affb5c105143798989d78043467c88da8e",
"max_stars_repo_licenses": [
"LPPL-1.3c"
],
"max_stars_repo_name": "zhuhel/PhDthesis",
"max_stars_repo_path": "chapters/Detector/muon.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 725,
"size": 2808
} |
\date{\today}
%\documentclass[journal=jacsat,manuscript=communication,layout=twocolumn]{achemso}
%\documentclass[jctcce,letterpaper,twocolumn,floatfix,preprintnumbers,superscriptaddress]{revtex4}
% \documentclass[12pt,preprint,aps,prb]{revtex4}
% \documentclass[aps,preprint,showpacs,superscriptaddress,groupedaddress]{revtex4} % for double-spaced preprint
\documentclass[aps,pra,twocolumn,superscriptaddress,groupedaddress]{revtex4} % for review and submission
\usepackage{dcolumn,graphicx,amsmath,amssymb,algorithm,algpseudocode}
\usepackage{mathtools}
\usepackage{xcolor}
\usepackage{todonotes}
\usepackage{qcircuit}
\usepackage{subcaption}
\newcommand{\total}{\mathrm{d}}
\newcommand{\ud}{\mathrm{d}}
\newcommand{\erf}{\mathrm{erf}}
\newcommand{\erfc}{\mathrm{erfc}}
\newcommand{\diff}[2]{\frac{\ud {#1}}{\ud {#2}}}
\newcommand{\pdiff}[2]{\frac{\partial #1}{\partial #2}}
\begin{document}
\definecolor{brickred}{rgb}{.72,0,0}
\definecolor{darkblue}{rgb}{0,0,0.5}
\definecolor{darkgreen}{rgb}{0,0.5,0}
\title{
Optimizing the Production of Test Vehicles: Classical Solutions Today and Hybrid Quantum/Classical Solutions Tomorrow
}
\author{Robert M. Parrish}
\email{[email protected]}
\author{Rachael Al-Saadon}
\affiliation{
QC Ware Corporation, Palo Alto, CA 94301, USA
}
\begin{abstract}
A complete and completely classical solution of the industrial challenge problem
is presented. Additional quantum/classical algorithmic gains are potentially
possible for harder future versions
of this problem experiencing geometric frustration - we propose a specific
research direction along these lines using a QC Ware specialty of quantum number
preserving gate fabric circuits to solve a key part of such a hybrid solution.
\end{abstract}
\maketitle
\section{Phase 1}
The problem statements variously ask for optimization of the constituents of a
set or ``constellation'' of $n_{\mathrm{C}}$ test vehicles, with each test
vehicle taken from a state space of $\sim 469$ binary dimensions called
``features'' (this and other dimensions quoted below to vary in future problem
sizes), and with each test vehicle satisfying hard ``feature-group'' and
``type-build rule'' constraints corresponding to $\sim 25$ basic test vehicle
types. The problem statements, predicated by the hard constraints,
specifically ask for (1) \textbf{SAT:} For a given $n_{\mathrm{C}}$, does there
exist, for a given set of $n_{\mathrm{test}} \sim 644$ tests depending through
binary expressions on the state space of each test vehicle, a set of
$n_{\mathrm{C}}$ test cars for which the $n_{\mathrm{test}}$ tests can be
separately evaluated, with the caveat that there need be a ``multiplicity'' of $K_I \sim 1-5$ distinct
test vehicles required to satisfy test $I$ for $I \in [0, n_{\mathrm{test}})$?
(2) \textbf{Weighted MAX-SAT:} For a given $n_{\mathrm{C}}$, what is the optimal
constellation of test vehicles such that the weighted sum of satisfied
$n_{\mathrm{test}}$ tests, each requiring $K_I$ distinct test vehicles, is
maximized? and (3) \textbf{Scheduling (not precisely specified):} For a given
set of $n_{\mathrm{test}}$ tests and corresponding set of $n_{\mathrm{C}}$ test
vehicles satisfying said tests including $\{ K_I \}$ multiplicity constraints in
a MAX-SAT formalism of (2), what is the optimal scheduling of said vehicles into
a test sequence with at most $n_{\mathrm{slot}} \sim 10$ tests performed on
distinct cars in each timeslot and with tests assigned to integer test groups
with definite sorting of test groups within each car?
A specific instance of the problem class described above was provided by BMW.
Taken naively, this problem instance involves binary optimization over a state
space of $\sim 469\times60 = 28140$ binary variables (plus additional state
space variables for scheduling), i.e., a state space of $2^{28140} \sim
10^{6574}$ dimensions, with hard constraints and fairly generic logical
expressions needed to specify constraints and objective function values. As
stated by BMW (quotes from the problem statement in italics): ``\textit{The
provided description is based on the actual numbers and constraints formulated
for this model. It, thus, represents the real complexity arising in a productive
setting.}''
Within the problem statement document, solutions to the above problems were
attempted using existing industry-standard SAT solvers and constraint
satisfaction solvers. The SAT problem of (1) was easily solved:
``\textit{For 100 cars, the problem can be solved in a few seconds. A linear search counting down
from 100 revealed the solution that at least 60 cars are needed to perform all the specified
750 tests.}''
However the weighted MAX-SAT problem of (2) was not solvable:
``\textit{On the other hand, the MAX-SAT problem was not solvable in a reasonable time with the
chosen approach.}''
Additionally, the scheduling problem of (3) was not solvable:
``\textit{[O]n the test laptop, the full problem with 700 tests wasn't solvable in less than 24
hours.}''
\begin{figure*}[ht]
\begin{center}
\includegraphics[width=4in]{figures/phase1/solution.pdf}
\caption{Characteristics of QC Ware solutions to the ``optimizing the production
of test vehicles'' BMW quantum computing challenge problem. (A) Solutions
to the MAX-SAT, and by corollary, SAT variants of problem variants (2) and (1),
respectively. (B) Solution to the scheduling problem variant (3).}
\label{fig:solution}
\end{center}
\end{figure*}
\textbf{We provide what we believe under the rules of the problem statement
represents a complete and tangible classical solution to all three specified
problem variants.}
The characteristics of our solution are presented in Figure \ref{fig:solution}
and the specific solution data and corresponding code are present in our
publicly-available repository at
\href{https://github.com/qcware/bmw}{https://github.com/qcware/bmw}.
Specifically, we developed a custom C++/Python code library to represent the
details of the problem in a natural format.
The combination of customized classical solution environment and high
performance implementation allows for very rapid exploration of the
hard-constraint-satisfying parameter space unique to this problem class. Within
this environment, we developed a powerful and simple set of heuristics to
approximately solve the MAX-SAT variant of the problem. This heuristic MAX-SAT
solver produces nested constellations of test cars with increasing
$n_{\mathrm{C}}$ and concomitant increasing MAX-SAT scores. The MAX-SAT
solutions coming from this heuristic achieve saturation of all specified $644$
tests (including multiplicity considerations) at the same $n_{\mathrm{C}} = 60$
bound determined by standard SAT solvers for problem (1) in the problem
statement document. Thus our MAX-SAT solution provides a tight bound solution
for problem (1) in the process of providing approximate solutions for (2). For
values of $n_{\mathrm{C}} \ll 60$, we believe our heuristic MAX-SAT solutions
are within a few percent of the global optimum. For the scheduling problem of
(3) we develop additional heuristics to schedule the test sequence from the
MAX-SAT optimized constellation of $n_{\mathrm{C}} = 60$ cars while respecting
the hard constraints of distinct cars within each time slot, strict ordering of
randomly-specified test groups within cars, and separate cars used within the
multiplicity considerations of each test. With the multiplicity considerations
included, there are 766 separate test-car pairs required, mandating a
theoretical floor of 77 test slots. Our heuristic solution provides a nearly
dense scheduling with 78 test slots required, i.e., within 1.3$\%$ of dense
scheduling. In aggregate, the classical steps required to reach the
MAX-SAT/SAT/Scheduling solutions sum to roughly $5-10$ min total of wall time on
a 72-core AWS EC2 \texttt{c5n.metal} instance, representing $\sim \$0.30$ worth
of classical computing resources at present prices.
\section{Methods}
\subsection{C++/Python Environment}
To facilitate rapid exploration of the state space for this problem class, we
developed a custom C++11/Python3 library API linked by PyBind11. No additional
dependencies beyond standard C++11, Python3, and the header-only PyBind11 linker
layer are needed - i.e., we do not rely on third-party SAT solvers. This library
contains simple classes enumerating the natural representation of the problem
contents. For instance, a \texttt{SimpleBinaryExpression} class is implemented
to represent the concept of simple all/any binary expressions containing
arbitrary not predicates as encountered throughout the type build rules and the
test rules. Instances of this class store the state of, e.g., a given build rule
predicate or implication expression, and can efficiently check whether this
expression is satisfied for a given proposed vehicle configuration. Two
\texttt{SimpleBinaryExpression} objects are further stacked in a
\texttt{SimpleBinaryImplication} object to represent the predicate and
implication of each type build rule. Multiple \texttt{SimpleBinaryExpression}
objects are chained together in a \texttt{FirstOrderAllBinaryExpression} object
to represent the parenthesized binary expressions present in each test rule.
Additional data structures are constructed to uniquely represent the type
feature groups, the type-specific build rules, the full set of test rules and
corresponding multiplicities, eventually yielding a complete C++ representation
of the full problem. The critical configuration state space of each test vehicle
is efficiently represented by the \texttt{std::vector<bool>} concept, i.e., each
proposed test vehicle is represented by a \texttt{std::vector<bool>} containing the
states of the $\sim 469$ features of each vehicle.
The entire library is reflexively exposed to Python3 through PyBind11 to merge
the effortless development of Python (i.e., regex for data parsing, short python
scripts to manage various experiments, compile-free debugging through python
printing) with the speed of compiled C++ for rate-limiting operations. The use
of C++ also facilitates the use of single-node parallelism through OpenMP
threading.
\subsection{Test Vehicle Seeds}
One might expect that the guess of $\vec 0$ (i.e., all features turned off)
would yield an acceptable starting guess for a test vehicle configuration.
However, already at $\vec 0$ some of the type build rules are violated, meaning
that $\vec 0$ is outside of the hard constraint space. Moreover, we have
empirically found that some of the $\sim 644$ test rules are rather hard to find
without specific direction within the constraint space. Therefore, to seed a
starting pool of test vehicles, we adopt the following procedure:
\begin{enumerate}
\item For each test rule, we generate a seed test vehicle that satisfies this
test rule with a randomly selected type.
\item To generate this vehicle, we first flip the required features on to
satisfy the test rule.
\item The active test rule features are then ``masked'' meaning that they are
frozen in current values satisfying the test rule throughout all future steps.
\item In the non-masked features, we then chase constraints until we arrive at a
valid car satisfying the type build rules.
\item If this procedure fails for a given randomly selected type, we randomly
select another type and repeat ad infinitum.
\end{enumerate}
At the end of this procedure we have a pool of $\sim 644$ test vehicles which
are largely ``featureless'' meaning that only the minimal number of features
have been activated to satisfy the test and chase the constraints into the valid
type build rule space. All test rules are present in at least one test vehicle in this
starting pool.
\subsection{MAX-SAT Optimization}
We start from the empty constellation $n_{\mathrm{C}} = 0$. To update this
constellation to $n_{\mathrm{C}} = 1$, we adopt the following procedure:
\begin{enumerate}
\item For each of the $\sim 644$ test vehicles in the candidate pool, we
perform several tens of thousands of directed Monte Carlo moves designed to
improve the number of rules simultaneously satisfied by the test vehicle, while
respecting the hard constraints. The Monte Carlo moves are described below.
\item We add to the constellation the single car from the updated candidate pool
that maximally increases the number of satisfied tests in the constellation.
\item We update the test set used to direct the Monte Carlo moves in Step 1 to
include only those rules which are unsatisfied by the current constellation.
\item We iterate this procedure until all test rules are satisfied, increasing
the constellation size $n_{\mathrm{C}}$ by one test vehicle per iteration.
\end{enumerate}
At the end of this procedure, we have a set of $n_{\mathrm{C}}$ nested
constellations each of which is a local approximant to the MAX-SAT [Problem (2)]
solution of corresponding constellation size. Once we obtain a constellation
that saturates all tests, we have an upper bound for the SAT solution [Problem
(1)] which turns out to be tight for the specifics of this problem instance.
\subsection{Masked Distance-2 Monte Carlo Moves}
One of the particular specialties of our approach lies in the strength of our
Monte Carlo moves. We adopt the following procedure:
\begin{enumerate}
\item For each test vehicle in the candidate pool, we randomly select two
feature groups to vary.
\item For each of these feature groups we move with equal probability to
deactivate the feature group or to active a random feature index within the
group.
\item We check if the proposed move satisfies the type build rules and return to
1 if not.
\item We check if the proposed move would perturb the masked features discussed
in the previous section, and return to 1 if so.
\item At this point, we know that the proposed test vehicle is valid and has not
moved a masked feature. If this proposed test vehicle improves the number of
satisfied tests in the active test set, we accept the updated vehicle and return
to 1. Else we reject the proposed test vehicle and return to 1.
\item We loop some user-specified number of iterations, usually on the order of
tens of thousands.
\end{enumerate}
There are several key observations that guided this heuristic choice of Monte
Carlo move scheme:
\begin{itemize}
\item These moves always remain on the constraint space.
\item These moves move by feature group rather than binary variables, and
therefore automatically satisfy the feature group constraint. Direct moves in
binary variables would have vanishing probability of satisfying the feature
group constraints.
\item Distance-2 moves are much more likely to be interesting and valid than
distance-1 moves. E.g., the activation of a single feature group often implies
the activation of another feature group through the type build rules. Such
implications can be satisfied with reasonable probability with distance-2 moves,
but are often unreachable with a sequence of distance-1 moves.
\item The acceptance of moves based on increased test set scores promotes a
compounding improvement of the test vehicle through the iterative procedure.
\end{itemize}
This procedure is implemented within C++, which treats the involved logic almost
natively. As such, we obtain orders of magnitude improvement over a
corresponding Python implementation of this portion of the approach.
Additionally, this stage of the procedure is embarrassingly parallel across the
$\sim 644$ test vehicles in the candidate pool. We parallelize this with OpenMP,
with dynamic scheduling invoked to attempt to load balance across the
anisotropic task sizes encountered.
Note that the efficiency of moves in this scheme relies on the concept that the
feature groups of the test vehicles are disjoint. This was not actually the case
in the original problem statement, due to a single collision between two feature
groups. We adjusted the problem statement to redefine two of the feature group
boundaries and to apply additional build constraint rules to yield an entirely
equivalent isomorphic representation of the problem. See the Appendix for
additional details.
\subsection{Scheduling}
For scheduling, we were initially considering doing some rather exotic work
involving global optimization, i.e., building a different constellation of test
vehicles that would be more optimized for the scheduling objective function than
for the MAX-SAT objective function. However, we started by exploring an extremely
simple greedy approach involving attempting to schedule our existing SAT/MAX-SAT
constellation of $n_{\mathrm{C}} = 60$ test vehicles, and found that it produced
almost dense packing. Therefore, we will only explain the latter approach here.
The scheduling heuristic approach works as follows:
\begin{enumerate}
\item We first sort the test rules by test group (first priority) and by
number of required cars for the test (second priority).
\item We traverse the current priority-sorted test set.
\item For each test, we identify and randomly sort the list of cars which
satisfy the test.
\item For each car in this list, we attempt to add the car to the current time
slot, continuing deeper into the car list if the car already exists in the
current time slot, if the car has already been used previously for this test
(for multi-car tests), or if the car has already been used for a lower-priority
test group. As soon as we find a valid car, we break out of the loop over the
car list.
\item If no test-car pair can be added to the current slot, we ``nuke'' the slot
and kick it onto the schedule with no-ops (i.e., empty time/engineer slots) inside.
\item If the addition of a car saturates the number of engineer slots, we kick
the slot onto the schedule.
\item We check if the addition of a car saturates a test rule, and update the
test rule set to remove this rule if so.
\item We iterate from 2 until all test rules are satisfied, as evidenced by the
active test set becoming empty.
\end{enumerate}
There is a small chance that this algorithm will enter an infinite loop where a
critical car is greedily used for a lower-priority test, and therefore cannot be
used for a higher-priority test. We have encountered this failure case in only
about 15$\%$ of runs. The existence of even a single successful run producing a
dense schedule obviates this concern.
Note that we find the absence of specified test groups in the problem
specification to be a major weakness of this part of the challenge. We generated
test groups ranging from 1 to 5 from random integers as sketched in the problem
statement. We did this exactly once using \texttt{numpy.random.randint} and
stored the values in our github repository - i.e., we generated what we feel is
a fair test and then froze it. Note also that we elected to define the priority
order to be sorted from $1$ to $5$ rather than from $5$ to $1$ in the problem
statement for aesthetic reasons - as these values are isotropically randomly
generated this makes no difference in problem structure.
\section{Toward Hybrid Quantum/Classical Approaches}
\subsection{Motivation}
We view the above full solutions as an unexpected
\emph{fait accompli} obtained during our formulation of a submission for this
challenge. Despite the formidable presentation of
this problem [as evidenced by the inability of the BMW working group to provide
a solution for problem variants (2) and (3) with conventional techniques], this
problem is not so hard as it looks. In particular, there seems to be only a
moderate amount of ``geometric frustration'' between test vehicles. This concept
of geometric frustration has many potential manifestations, all stemming from
the basic idea that local moves to optimize one subset of the problem could
easily have severely penalized the quality of the global solution. For instance,
focusing on MAX-SAT, it could well have been the case that feature choices on
candidate test vehicles were so tightly correlated through test case
satisfaction that attempts to locally maximize the solution quality for each
proposed single test vehicle addition to the constellation would severely negatively
impact the MAX-SAT score for larger values of constellation size
$n_{\mathrm{C}}$. This might manifest as a problem instance where no single test
vehicle in the ideal global solution constellation is a ``hero'' individually
satisfying a relatively large number of tests (note that all of our current
solution test vehicles are heroes!). Instead each test vehicle in the ideal
solution constellation might satisfy only something on the order of
$n_{\mathrm{test}} / n_{\mathrm{C}}$ cars, with the particular test cluster
satisfied for by each test vehicle determined by a very brittle set of many-body
correlations with the active features sets across all test vehicles in the
constellation. Such a case would stymie essentially all heuristic optimization
approaches that we can evision.
A geometrically frustrated problem instance of this type is not hard to imagine
occurring in real engineering practice. In fact, we would argue that such
geometric frustration is already present in the current problem instance, albeit
to a low enough degree that some halfway clever heuristics and the big hammer of a
\texttt{c5n.metal} node can defeat such geometric frustration. In particular,
as the feature groups, build rules, and test sets all grow in both size and
complexity in future practice at BMW, we will likely see cases that are highly
resilient to direct solution by local heuristics.
Below, we propose a specific directed research project to develop novel hybrid
quantum/classical algorithms to target such frustrated MAX-SAT problems. Key to
our approach is the idea that one should use the CPU (or other high-performance
classical computing resources) and QPU separately for what each are best at. As
such, we propose a two-stage approach where one first uses the CPU to generate a
large, diverse, and structured candidate pool of cars (dealing with the hard
build constraints on the cars on the CPU) and then selecting the best
constellation of $n_{C}$ test cars from this pool by using the QPU to solve a
variant of MAX-COVER. This variant of MAX-COVER is an interesting and
non-trivial extension of standard MAX-COVER that represents a total
Hamming-weight-constrained polynomial binary optimization problem (a specific
PCBO problem).
Before discussing the proposed approach, we will briefly discuss a red herring
approach that will serve to contrast with our selected approach:
\subsection{A Path to Avoid}
It is highly tempting to map the binary state space of the MAX-SAT variant of
the problem to the qubits or qudits of a quantum device. This mapping would
provide the most direct encapsulation of the problem on the quantum hardware
and would retain the conceptually sacred possibility of an exact global optimum.
Conceptually, one would create a quantum superposition over all possible binary
strings in the state space, and then start applying MAX-SAT-Hamiltonian-aware
methods like quantum amplitude amplification (using either traditional deep
quantum phase estimation circuits or a variant of the new short-depth maximum
likelihood estimation, Grover zeroing, or Chinese remainder theorem circuit
schedules), QAOA, or VQE to boost the observation probability of the global
optimum or other local optima. One could immediately improve this idea by either
conceptually or physically using qudits to represent each feature group - this
would drastically reduce the search space size and simultaneously mitigate
issues with the feature group constraints.
We do not recommend pursuing any approach along these lines for exactly one reason:
test vehicle build constraints. These constraints severely limit the valid state
space of the test vehicles and are posed as generic predicate/implication binary
logic statements involving up to dozens of simultaneous variables per expression. Such
constraints are completely alien in a quantum computing context, in the same way
that mapping a deeply serial code to a GPU is a technical non-starter.
Therefore, we see no viable path (even for error-corrected quantum approaches in
the $\geq$decade timeframe) to develop general purpose quantum circuits that can
provide powerful moves within the Hilbert space while simultaneously respecting
these generic build rules.
\subsection{A Path to Take}
We instead propose a hybrid quantum/classical approach to the geometrically frustrated MAX-SAT
problem. The approach works as follows:
\begin{enumerate}
\item Use classical high performance computing resources such as extensions of
our existing methodology to build a large, diverse, and highly structured pool
of $\sim 10^{2}$ to $\sim 10^{6}$ candidate test vehicles. This pool generation approach
can be tuned for various frustration cases, e.g., providing penalties for
``hero'' test vehicles, providing repulsive terms between test vehicles with overlapping
feature sets, or encouraging small groups of test vehicles to work together to
realize tests with high multiplicities.
\item Use novel symmetry-preserving constrained binary optimization optimization
approaches on quantum hardware to solve the modified MAX-COVER problem of
selecting the optimal constellation of $n_{C}$ test vehicles from the candidate
pool.
\item Perform additional heuristic refinement on classical resources to further
increase the MAX-SAT score.
\end{enumerate}
\subsection{The Quantum Problem}
The modified MAX-COVER problem that represents the basic task of the quantum
hardware in our hybrid method has the following two compelling features: (1) the
problem maps much more naturally to a qubit device than the approach discarded
in the previous section and (2) the problem has an extremely interesting global
Hamming weight constraint structure that merits additional quantum algorithm
development, i.e., there is something new, tangible, and valuable to be done
here on the quantum algorithms research side.
Specifically, we have a binary state space of $n_{P}$ binary variables, where
$n_{P} \sim 10^{2} - 10^{6}$ is the number of test vehicles in the candidate
pool. We are asked to produce a $n_{P}$-dimensional binary string with Hamming
weight (total number of 1-state bits) $n_{C}$, where the $n_{C}$ 1-state bits
identify the pivots of the test pool to place into the test constellation. This
binary string is to be optimized to maximize the number of satisfied test cases
in the constellation, including mutliplicity considerations. This last
consideration generalizes the problem beyond standard MAX-COVER somewhat,
implying some extensions to the Hamiltonian considerations that we will consider
in future work.
The most interesting feature of the problem is the total Hamming weight
constraint. This formally reduces the dimension of the search space from
$2^{n_{P}}$ to ${n_{P} \choose n_{C} }$, which is still a roughly exponentially
large search space. Standard heuristics for MAX-COVER will likely fail in this
environment due to the same geometric frustration between choices that motivated
our basic consideration of a hybrid quantum/classical algorithm. The challenge
at this point is to craft a quantum optimization algorithm that respects the
hard Hamming weight constraint while also providing strong optimization power.
Some existing approaches have discussed this case, such as a variant of the
quantum alternating operator ansatz (QAOA) with ring or complete graph mixers
\cite{cook2020quantum},
or the mixer-phaser ansatz for QAOA with hard Hamming constraints \cite{larose2021mixer}. Both of these
methods share the common traits that they start from Dicke states (essentially
the usual $|+\rangle$ state of equal amplitudes on all state space
configurations, but restricted to preserve target Hamming weight), and then use
arrays of 2-qubit XY mixer gates (similar to the ``reversible beamsplitter or RBS
gates'' of quantum optics'' or the ``Givens gates'' of quantum chemistry) to
provide Hamming-wight-preserving exploration through the Hilbert space during
the QAOA optimization process. It might be possible to directly implement one of
this existing methods to solve the MAX-COVER problem for our problem instance.
However, we note that there exist several potential problems with these
proposed ansatze. Most tangibly (1) it is well known that networks of Givens or
similar gates do not provide full entangling power across the
Hamming-weight-preserving Hilbert space, and therefore may not be able to
provide quantum advantage when used as QAOA elements and (2) some of the most
promising variants of the existing approaches, such as the complete graph XY
mixers use highly nonlocal pairs of non-adjacent qubits that may be
prohibitively difficult to implement on near-term quantum devices with limited
qubit connectivity. We propose an avenue of research that goes somewhat beyond
these existing methods, and that works to directly confront these two major
issues: the adoption of universal Hamming-weight-preserving gate fabrics into an
extended version of QAOA or VQE that can provide full universality within the
Hamming-weight-preserving subspace while simultaneously being amenable to
implementation on NISQ-era quantum devices as a simple 3-local nearest-neighbor
gate fabric.
These Hamming-weight-preserving quantum number gate fabrics were noticed as an
aside during a collaborative effort between QC Ware Corp. and Covestro
Deutschland AG to develop universal but simple gate fabric circuits for the
simulation of fermions in the context of quantum chemistry \cite{anselmetti2021local}. Fermions exhibit
numerous symmetry constraints which must be respected during quantum algorithms simulating
these fermions, and one of these symmetries in Hamming weight. Therefore, as an
intermediate to full the full fermionic gates that were the major finding of
Ref. \citenum{anselmetti2021local}, we considered the prerequisite question of the minimal circuit that is
universal for total Hamming weight while also preserving a semblance of
simplicity and linear locality. As is well-known in the literature, we noted
that the two-qubit fabric of Givens gates as in Figure \ref{fig:H1} (similar to
the XY mixers discussed above) are not universal, but an extension of this
concept to a local fabric of three-qubit Hamming-weight-preserving gates as
depicted in Figure \ref{fig:H2} achieves universality while preserving the
global Hamming weight constraint.
\begin{figure}[b]
\centering
\begin{equation*}
\begin{array}{l}
\Qcircuit @R=0.3em @C=0.3em @!R {
% \lstick{|0\rangle}
& \multigate{1}{\mathit{H}(4)}
& \qw
& \multigate{1}{\mathit{H}(4)}
& \qw
& \qw \\
% \lstick{|1\rangle}
& \ghost{\mathit{H}(4)}
& \multigate{1}{\mathit{H}(4)}
& \ghost{\mathit{H}(4)}
& \multigate{1}{\mathit{H}(4)}
& \qw \\
% \lstick{|2\rangle}
& \multigate{1}{\mathit{H}(4)}
& \ghost{\mathit{H}(4)}
& \multigate{1}{\mathit{H}(4)}
& \ghost{\mathit{H}(4)}
& \qw \\
% \lstick{|3\rangle}
& \ghost{\mathit{H}(4)}
& \multigate{1}{\mathit{H}(4)}
& \ghost{\mathit{H}(4)}
& \multigate{1}{\mathit{H}(4)}
& \qw \\
% \lstick{|4\rangle}
& \multigate{1}{\mathit{H}(4)}
& \ghost{\mathit{H}(4)}
& \multigate{1}{\mathit{H}(4)}
& \ghost{\mathit{H}(4)}
& \qw \\
% \lstick{|5\rangle}
& \ghost{\mathit{H}(4)}
& \qw
& \ghost{\mathit{H}(4)}
& \qw
& \qw \\
}
\end{array}
\ldots
\phantom{}
\not\cong
\phantom{}
\begin{array}{l}
\Qcircuit @R=0.3em @C=0.3em @!R {
% \lstick{|0\rangle}
& \multigate{5}{\mathit{H}(2^6)}
& \qw \\
% \lstick{|0\rangle}
& \ghost{\mathit{H}(2^6)}
& \qw \\
% \lstick{|0\rangle}
& \ghost{\mathit{H}(2^6)}
& \qw \\
% \lstick{|0\rangle}
& \ghost{\mathit{H}(2^6)}
& \qw \\
% \lstick{|0\rangle}
& \ghost{\mathit{H}(2^6)}
& \qw \\
% \lstick{|0\rangle}
& \ghost{\mathit{H}(2^6)}
& \qw \\
}
\end{array}
\end{equation*}
\begin{equation*}
\begin{array}{l}
\Qcircuit @R=0.3em @C=0.3em @!R {
& \multigate{1}{H(4)}
& \qw \\
& \ghost{H(4)}
& \qw \\
}
\end{array}
\coloneqq
\begin{array}{l}
\Qcircuit @R=0.3em @C=0.3em @!R {
& \gate{R_{y} (+\pi / 4)}
& \ctrl{1}
& \gate{R_{y} (+\lambda / 2)}
& \ctrl{1}
& \gate{R_{y} (-\pi / 4)}
& \qw \\
& \gate{R_{y} (+\pi / 4)}
& \ctrl{-1}
& \gate{R_{y} (-\lambda/2)}
& \ctrl{-1}
& \gate{R_{y} (-\pi / 4)}
& \qw \\
}
\end{array}
\end{equation*}
\begin{equation*}
=
\left [
\begin{array}{rrrr}
1 & & & \\
& c & +s & \\
& -s & c & \\
& & & 1 \\
\end{array}
\right ]
:
\
\begin{array}{l}
c \coloneqq \cos(\lambda/2)
\\
s \coloneqq \sin(\lambda/2)
\\
\end{array}
\end{equation*}
\caption{From Ref. \citenum{anselmetti2021local}: Gate fabric attempt \emph{not}
universal for the Hamming-weight-preserving subgroup $\mathcal{H}(2^N)$
(sketched for $N=6)$. The gate fabric is a 2-local-nearest-neighbor
tessellation of alternating even and odd qubit-pair 1-parameter, 2-qubit
Hamming-weight-preserving $\hat H(4)$ gates. The gate fabric exactly commutes
with the Hamming weight operator $\hat P \equiv \sum_{p} (\hat I - \hat Z_p) /
2$, but the gate fabric does not span $\mathcal{H}(2^N)$ for any depth.
}
\label{fig:H1}
\end{figure}
\begin{figure}
\centering
\begin{equation*}
\label{eq:SO2N-Hamming}
\begin{array}{l}
\Qcircuit @R=0.3em @C=0.3em @!R {
% \lstick{|0\rangle}
& \multigate{2}{\mathit{H}(8)}
& \qw
& \qw
& \qw
& \qw \\
% \lstick{|1\rangle}
& \ghost{\mathit{H}(8)}
& \multigate{2}{\mathit{H}(8)}
& \qw
& \qw \\
% \lstick{|2\rangle}
& \ghost{\mathit{H}(8)}
& \ghost{\mathit{H}(8)}
& \multigate{2}{\mathit{H}(8)}
& \qw \\
% \lstick{|0\rangle}
& \multigate{2}{\mathit{H}(8)}
& \ghost{\mathit{H}(8)}
& \ghost{\mathit{H}(8)}
& \qw \\
% \lstick{|1\rangle}
& \ghost{\mathit{H}(8)}
& \multigate{2}{\mathit{H}(8)}
& \ghost{\mathit{H}(8)}
& \qw \\
% \lstick{|2\rangle}
& \ghost{\mathit{H}(8)}
& \ghost{\mathit{H}(8)}
& \multigate{2}{\mathit{H}(8)}
& \qw \\
% \lstick{|0\rangle}
& \multigate{2}{\mathit{H}(8)}
& \ghost{\mathit{H}(8)}
& \ghost{\mathit{H}(8)}
& \qw \\
% \lstick{|1\rangle}
& \ghost{\mathit{H}(8)}
& \qw
& \ghost{\mathit{H}(8)}
& \qw \\
% \lstick{|2\rangle}
& \ghost{\mathit{H}(8)}
& \qw
& \qw
& \qw \\
}
\end{array}
\phantom{}
\ldots
\cong
\phantom{}
\begin{array}{l}
\Qcircuit @R=0.3em @C=0.3em @!R {
% \lstick{|0\rangle}
& \multigate{8}{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
& \ghost{\mathit{H}(2^9)}
& \qw \\
}
\end{array}
\end{equation*}
\begin{equation*}
\mathit{H} (8)
\coloneqq
\left [
\begin{array}{rrrrrrrrr}
1 & & & & & & & & \\
& \color{brickred}{u_{00}^{1}} & \color{brickred}{u_{01}^{1}} & &
\color{brickred}{u_{02}^{1}} & & & & \\
& \color{brickred}{u_{10}^{1}} & \color{brickred}{u_{11}^{1}} & &
\color{brickred}{u_{12}^{1}} & & & & \\
& & & \color{darkblue}{u_{00}^{2}} & & \color{darkblue}{u_{01}^{2}} &
\color{darkblue}{u_{02}^{2}} & \\
& \color{brickred}{u_{20}^{1}} & \color{brickred}{u_{21}^{1}} & &
\color{brickred}{u_{22}^{1}} & & & & \\
& & & \color{darkblue}{u_{10}^{2}} & & \color{darkblue}{u_{11}^{2}} &
\color{darkblue}{u_{12}^{2}} & \\
& & & \color{darkblue}{u_{20}^{2}} & & \color{darkblue}{u_{21}^{2}} &
\color{darkblue}{u_{22}^{2}} & \\
& & & & & & & & 1 \\
\end{array}
\right ]
\end{equation*}
\begin{equation*}
\hat u^{d}
\coloneqq
\exp(\hat x^{d})
\in
\mathcal{SO}(3)
:
\
\hat x^{d\dagger}
=
-
\hat x^{d}
:
\hat x^{d}
\in
\mathbb{R}^3
\times
\mathbb{R}^3
,
\
d \in [1, 2]
\end{equation*}
\caption{From Ref. \citenum{anselmetti2021local}: Gate fabric universal for the Hamming-weight-preserving subgroup
$\mathcal{H}(2^N)$ (sketched for $N=9)$. The gate fabric is a
3-local-nearest-neighbor tessellation of cascading qubit-triple 6-parameter,
3-qubit Hamming-weight-preserving $\hat H(8)$ gates. Each $\hat H(8)$ gate is
composed of a 3-parameter $\mathcal{SO}(3)$ rotation in the $d$-Hamming-weight
subspace, where $d \in [1, 2]$ for a total of 6 parameters. The gate fabric exactly
commutes with the Hamming weight operator $\hat P \equiv \sum_{p} (\hat I - \hat
Z_p) / 2$ and spans $\mathcal{H}(2^N)$ at sufficient depth.
}
\label{fig:H2}
\end{figure}
A tangible and potentially highly valuable research direction is to build an
extension of QAOA or VQE built around these three-qubit quantum number
preserving gates to address the MAX-COVER problem needed for our hybrid
quantum/classical solution to the geometrically frustrated test vehicle
production MAX-SAT problem. This is the basis of our research proposal to BMW.
\clearpage
\section{Phase 2}
Following Phase 1 of the BMW Quantum Challenge, we proposed and tested three extensions
of our proposal: 1) reduce the cost of test vehicles by two-step feature reduction,
2) build a test vehicle constellation with built-in Hamming-weight constraint, and
3) develop a quantum simulator for a Hamming-weight-preserving QNP fabric, the results
of which are described below.
\subsection{Post-facto Feature Reduction}
The test vehicles generated in Phase 1 had large Hamming-weights and represented
complex vehicle builds. In order to reduce the test vehicle cost, the constellation
of test vehicles reported in Phase 1 was used as a starting point for feature reduction.
{\color{red} [target for Hamming weight.]}
A plot of the number of test rules that each vehicle complys with before and after feature
reduction is shown in Fig. \ref{fig:6-reduce}. While the test vehicle cost was reduced
by feature selection, all vehicles in the constellation still satisfy a high number of
test rule constraints.
\begin{figure*}
\begin{center}
\includegraphics[width=0.5\textwidth]{figures/phase2/6-reduce-rules.pdf}
\caption{Result of test vehicle constellation features reduction.
Blue indicates the number of test rules satisfied with the initial constellation
generated in Phase 1 whereas red represents post-facto feature reduction in Phase 2
from the initial constellation from Phase 1.}
\label{fig:6-reduce}
\end{center}
\end{figure*}
\subsection{Hamming-Weight Constrained MAX-SAT}
The test vehicle cost or number of features can be constrained by imposing a restriction
on the number of allowed active features.
Test vehicles were restricted to have at most 30 active features during optimization
of the test vehicle constellation. MAX-SAT results for Hamming-weight constrained
optimization are qualitatively similar to those from Phase 1 (Fig. \ref{fig:solution2}).
The constellation saturates at 62 test vehicles (recall the SAT bound is at 60 vehicles).
\begin{figure*}
\begin{center}
\includegraphics[width=0.5\textwidth]{figures/phase2/2p-score.pdf}
\caption{Comparison of the results from Phase 1 and Phase 2 MAX-SAT. Phase 1 (blue) was
carried out without feature restriction and Phase 2 (red) was carried out with
restriction of the Hamming weight of each vehicle.}
\label{fig:solution2}
\end{center}
\end{figure*}
A comparison of the vehicle feature Hamming weight and number of test rules met for
Phase 1 and Phase 2 is depicted in Fig. \ref{fig:fig}. Remarkably, when the optimization
is performed with a constraint on the number of active features that reduces the
Hamming weight by nearly half, the number of test rules met by the constellation exhibit
a minimal reduction (Figs. \ref{fig:sub-third} and \ref{fig:sub-fourth}).
\begin{figure*}
\centering
\begin{subfigure}[b]{.475\textwidth}
\centering
% include first image
\includegraphics[width=\textwidth]{figures/phase2/2-prod-hamming.pdf}
\caption{Phase 1 constellation Hamming weight.}
\label{fig:sub-first}
\end{subfigure}
\hfill
\begin{subfigure}[b]{.475\textwidth}
\centering
% include second image
\includegraphics[width=\textwidth]{figures/phase2/2p-prod-hamming.pdf}
\caption{Phase 2 constrained constellation Hamming weight.}
\label{fig:sub-second}
\end{subfigure}
\vskip\baselineskip
\begin{subfigure}[b]{.475\textwidth}
\centering
% include third image
\includegraphics[width=\textwidth]{figures/phase2/2-prod-rules.pdf}
\caption{Phase 1 constellation test rules satisfied.}
\label{fig:sub-third}
\end{subfigure}
\hfill
\begin{subfigure}[b]{.475\textwidth}
\centering
% include fourth image
\includegraphics[width=\textwidth]{figures/phase2/2p-prod-rules.pdf}
\caption{Phase 2 constrained constellation test rules satisfied.}
\label{fig:sub-fourth}
\end{subfigure}
\caption{Comparison of the results from unconstrained Phase 1 and Hamming-weight constrained Phase 2 test vehicle constellation optimization. Plots (a) and (b) show the
Hamming weight of each vehicle in the constellation. Plots (c) and (d) show the
number of test rules satisfied per vehicle in the constellation.}
\label{fig:fig}
\end{figure*}
\subsection{Hamming-Weight-Preserving QNP Fabric Simulator}
The Hamming-weight constrained constellation described in the previous section
was used to solve the MAX-COVER problem.
\subsection{Comment on Scheduling}
While scheduling was not the focus of our Phase 2 work, we note an observation from
scheduling of the Phase 1 constellation. As shown in Fig. \ref{fig:freq}, each test
vechicle is scheduled for multiple tests without restriction. Logistically, the
number of tests performed per vehicle is a parameter than should be accounted for.
A limit can be placed on the number of tests performed per vehicle to enforce
diversity in the scheduled vehicles, or alternatively, the parameter can be set
to ensure that a given vehicle meets a certain number of tests in order to be put on
the schedule.
\begin{figure*}
\begin{center}
\includegraphics[width=0.5\textwidth]{figures/phase2/3-sched-frequency.pdf}
\caption{Number of times each vehicle was scheduled for a test in the Phase 1 scheduling.}
\label{fig:freq}
\end{center}
\end{figure*}
\bibliography{refs}
\appendix
\newpage
\clearpage
\section{Phase 1: Details of Problem Refinement and Solution Validation}
\subsection{Feature Groups Collision Issue}
The efficient exploration of the state space in terms of moves in feature groups
requires that the feature groups be disjoint. We found that this was not the
case in the specified problem due to a collision between feature groups 40 and
41. To fix this issue, we modified these two groups and added additional type
build rules to produce an isomorphic variant of the problem with disjoint
feature groups. Details:
Group 40 (28 elements): \texttt{[245, 246, 247, 250, 251, 252, 253, 254, 255,
256, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280,
281, 282, 284]}
Group 41 (46 elements): \texttt{[245, 246, 247, 248, 249, 250, 251, 252, 253,
254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 267, 268, 269, 270, 271,
272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 285, 286, 287, 288,
289, 290, 291, 292, 293]}
Union (48 elements): \texttt{[245, 246, 247, 248, 249, 250, 251, 252, 253, 254,
255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 266, 267, 268, 269, 270, 271,
272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287,
288, 289, 290, 291, 292, 293]}
Intersection (26 elements): \texttt{[245, 246, 247, 250, 251, 252, 253, 254,
255, 256, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280,
281, 282]}
In Group 40 but not Group 41 (2 elements): \texttt{[266, 284]}
In Group 41 but not Group 40 (20 elements): \texttt{[248, 249, 257, 258, 259,
260, 261, 262, 263, 264, 283, 285, 286, 287, 288, 289, 290, 291, 292, 293]}
This is hugely vexing for efficient enumeration of group-feature-satisfying
vehicle candidates.
This can be overcome by (1) redefining Group 40 to be \texttt{[266, 284]} and
then (2) adding a new global (added for all types) rule to the type build rules:
\texttt{ F266 | F284 => !F245 \& !F246 \& !F247 \& !F250 \& !F251 \& !F252 \& !F253 \&
!F254 \& !F255 \& !F256 \& !F267 \& !F268 \& !F269 \& !F270 \& !F271 \& !F272 \& !F273 \&
!F274 \& !F275 \& !F276 \& !F277 \& !F278 \& !F279 \& !F280 \& !F281 \& !F282}
If the group features are chosen randomly, uniformly, and independently, this
rule has a probability of $2/(1+2)$ to be activated (if 266 xor 284 are true).
The probability of the rule being violated is $\sim 26/(1+46) \sim 0.55$.
Therefore the joint probability of the rule being activated and failing is
$(2/3) * (26/47) \sim 0.37$. Note that this high success probability is somewhat
accidental, and is only due to the fact that the in-40-but-not-in-41 subset is
small relative to the intersection \emph{and} the in-41-but-not-in-40 subset is
large relative to the the intersection. In future, it is recommended that
collisions between feature groups be avoided at all costs in the formulation of
this problem, insofar as is possible.
\subsection{Solution Verification}
A standalone Python code was used to verify that the solutions reported satisfy
both the buildability and test constraints.
The MAX-SAT heuristic produced a constellation of test vehicles with specified
active features. The test vehicles must comply with a set of buildability
constraints related (1) vehicle type, (2) feature group exclusivity, and
(3) configuration rules.
Each vehicle can be one of 25 types, which dictates allowed features.
The type rules were sorted in a (\textit{number of types} $\times$
\textit{number of available features}) boolean array, with allowed features set
to True. For each vehicle in the solution constellation, if a feature was True,
it was compared to the allowed features for that vehicle type to assert that the
vehicle met type rules.
The feature groups constraints consist of 42 feature groups, which contain a set
of mutually exclusive features. For each test vehicle, one feature in the group can
be true. The feature groups were sorted in a (\textit{number of groups} $\times$
\textit{number of available features}) boolean array with each feature in the group
set to True. For each test vehicle, if a feature was True, the feature group for
which that feature was present was found and the bitwise AND was taken between
these two arrays, returning one True value if satisfied.
The set of test vehicles must also satisfy the configuration constraints. For a
given test vehicle type, a set of features is either forced and/ or forbidden
based on the presence and absence of specific features. There are 4032
configuration constraints in the problem. Each configuration rule was separated
into an initial condition and a forced implication. Theses were further reduced
to 'on' sets, 'off' sets, and sets where 'any' one feature can be true, all of
which were stored as True in boolean arrays. For each test vehicle, the
configuration rules associated with the vehicle type were evaluated. First the
initial condition then forced implication was checked, in the same procedure
outlined below. (1) If the rule had a set of 'on' features, the logical AND of
the test vehicle features and 'on' features rule was computed. The number of
True values in the resultant array should equal those in the 'on' features array
if satisfied. (2) If the rule had a set of 'off' features, the array of features
for the test vehicle was inverted, so that 'off' features were set to True. Then
the logical AND was computed, which should have the same number of True elements
as the 'off' features array if satisfied. (3) If the rule had an 'any' set, where
one of the features must be true, the AND was taken between the 'any' features
array and the test vehicle features, which should return one True value.
For the scheduling solution, we verified that the results obey the scheduling
rules. The scheduling rules stipulate that there are up to 10 tests per day and
that each vehicle can undergo one test per day. We check that for each test day,
each vehicle index appears once. Next, each test is ranked in a group, and for
each test vehicle, it must undergo tests in order according to the test ranking.
To test that this condition is satisfied, we sort through the group rank of each
test vehicle. If a group index is out of order, we find which vehicle is in that
slot, and trace back all previous tests that the vehicle underwent. If the tests
for that specific vehicle are in order of ranking, the test set satisfies the test
group rank rules. Finally, each test requires a certain number of test vehicles.
For each test, we find and count all occurrences of that specific test and compare
it to the number required for that test.
\section{Phase 2: . . .}
\end{document}
| {
"alphanum_fraction": 0.7641159218,
"avg_line_length": 47.7074363992,
"ext": "tex",
"hexsha": "6ef64a14e33d442b799c7885cf26729df08caef6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "761e405587bffe5dc4ca9f79432a79df2c7fd8f8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "qcware/bmw",
"max_forks_repo_path": "report/bmw.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "761e405587bffe5dc4ca9f79432a79df2c7fd8f8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "qcware/bmw",
"max_issues_repo_path": "report/bmw.tex",
"max_line_length": 181,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "761e405587bffe5dc4ca9f79432a79df2c7fd8f8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "qcware/bmw",
"max_stars_repo_path": "report/bmw.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 12692,
"size": 48757
} |
% LaTeX support: [email protected] and CWD
% In case you need support, please attach all files that are necessary for compiling as well as the log file, and specify the details of your LaTeX setup (which operating system and LaTeX version / tools you are using).
%=================================================================
\documentclass[diversity,article,submit,moreauthors,pdftex]{Definitions/mdpi}
% If you would like to post an early version of this manuscript as a preprint, you may use preprint as the journal and change 'submit' to 'accept'. The document class line would be, e.g., \documentclass[preprints,article,accept,moreauthors,pdftex]{mdpi}. This is especially recommended for submission to arXiv, where line numbers should be removed before posting. For preprints.org, the editorial staff will make this change immediately prior to posting.
\input{code_format.tex}
\usepackage{subfig} % Compound figures
%--------------------
% Class Options:
%--------------------
%----------
% journal
%----------
% Choose between the following MDPI journals:
% acoustics, actuators, addictions, admsci, aerospace, agriculture, agriengineering, agronomy, algorithms, animals, antibiotics, antibodies, antioxidants, applsci, arts, asc, asi, atmosphere, atoms, axioms, batteries, bdcc, behavsci , beverages, bioengineering, biology, biomedicines, biomimetics, biomolecules, biosensors, brainsci , buildings, cancers, carbon , catalysts, cells, ceramics, challenges, chemengineering, chemistry, chemosensors, children, cleantechnol, climate, clockssleep, cmd, coatings, colloids, computation, computers, condensedmatter, cosmetics, cryptography, crystals, dairy, data, dentistry, designs , diagnostics, diseases, diversity, drones, econometrics, economies, education, ejihpe, electrochem, electronics, energies, entropy, environments, epigenomes, est, fermentation, fibers, fire, fishes, fluids, foods, forecasting, forests, fractalfract, futureinternet, futurephys, galaxies, games, gastrointestdisord, gels, genealogy, genes, geohazards, geosciences, geriatrics, hazardousmatters, healthcare, heritage, highthroughput, horticulturae, humanities, hydrology, ijerph, ijfs, ijgi, ijms, ijns, ijtpp, informatics, information, infrastructures, inorganics, insects, instruments, inventions, iot, j, jcdd, jcm, jcp, jcs, jdb, jfb, jfmk, jimaging, jintelligence, jlpea, jmmp, jmse, jnt, jof, joitmc, jpm, jrfm, jsan, land, languages, laws, life, literature, logistics, lubricants, machines, magnetochemistry, make, marinedrugs, materials, mathematics, mca, medicina, medicines, medsci, membranes, metabolites, metals, microarrays, micromachines, microorganisms, minerals, modelling, molbank, molecules, mps, mti, nanomaterials, ncrna, neuroglia, nitrogen, notspecified, nutrients, ohbm, optics, particles, pathogens, pharmaceuticals, pharmaceutics, pharmacy, philosophies, photonics, physics, plants, plasma, polymers, polysaccharides, preprints , proceedings, processes, proteomes, psych, publications, quantumrep, quaternary, qubs, reactions, recycling, religions, remotesensing, reports, resources, risks, robotics, safety, sci, scipharm, sensors, separations, sexes, signals, sinusitis, smartcities, sna, societies, socsci, soilsystems, sports, standards, stats, surfaces, surgeries, sustainability, symmetry, systems, technologies, test, toxics, toxins, tropicalmed, universe, urbansci, vaccines, vehicles, vetsci, vibration, viruses, vision, water, wem, wevj
%---------
% article
%---------
% The default type of manuscript is "article", but can be replaced by:
% abstract, addendum, article, benchmark, book, bookreview, briefreport, casereport, changes, comment, commentary, communication, conceptpaper, conferenceproceedings, correction, conferencereport, expressionofconcern, extendedabstract, meetingreport, creative, datadescriptor, discussion, editorial, essay, erratum, hypothesis, interestingimages, letter, meetingreport, newbookreceived, obituary, opinion, projectreport, reply, retraction, review, perspective, protocol, shortnote, supfile, technicalnote, viewpoint
% supfile = supplementary materials
%----------
% submit
%----------
% The class option "submit" will be changed to "accept" by the Editorial Office when the paper is accepted. This will only make changes to the frontpage (e.g., the logo of the journal will get visible), the headings, and the copyright information. Also, line numbering will be removed. Journal info and pagination for accepted papers will also be assigned by the Editorial Office.
%------------------
% moreauthors
%------------------
% If there is only one author the class option oneauthor should be used. Otherwise use the class option moreauthors.
%---------
% pdftex
%---------
% The option pdftex is for use with pdfLaTeX. If eps figures are used, remove the option pdftex and use LaTeX and dvi2pdf.
%=================================================================
\firstpage{1}
\makeatletter
\setcounter{page}{\@firstpage}
\makeatother
\pubvolume{xx}
\issuenum{1}
\articlenumber{5}
\pubyear{2019}
\copyrightyear{2019}
%\externaleditor{Academic Editor: name}
\history{Received: date; Accepted: date; Published: date}
%\updates{yes} % If there is an update available, un-comment this line
%% MDPI internal command: uncomment if new journal that already uses continuous page numbers
%\continuouspages{yes}
%------------------------------------------------------------------
% The following line should be uncommented if the LaTeX file is uploaded to arXiv.org
%\pdfoutput=1
%=================================================================
% Add packages and commands here. The following packages are loaded in our class file: fontenc, calc, indentfirst, fancyhdr, graphicx, lastpage, ifthen, lineno, float, amsmath, setspace, enumitem, mathpazo, booktabs, titlesec, etoolbox, amsthm, hyphenat, natbib, hyperref, footmisc, geometry, caption, url, mdframed, tabto, soul, multirow, microtype, tikz
\usepackage{xcolor}
\newcommand{\TODO}[1]{\textcolor{red}{\textbf{#1}}} % \TODO{NOTE TO SELF WRITTEN IN RED}
%=================================================================
%% Please use the following mathematics environments: Theorem, Lemma, Corollary, Proposition, Characterization, Property, Problem, Example, ExamplesandDefinitions, Hypothesis, Remark, Definition, Notation, Assumption
%% For proofs, please use the proof environment (the amsthm package is loaded by the MDPI class).
%=================================================================
% Full title of the paper (Capitalized)
\Title{Diversity and Structure of an Arid Woodland at the Western Edge of the Miombo Ecoregion, Southwest Angola}
% Author Orchid ID: enter ID or remove command
\newcommand{\orcidauthorA}{0000-0001-5595-255X} % John Godlee
\newcommand{\orcidauthorB}{0000-0002-8859-7491} % Francisco Maiato
\newcommand{\orcidauthorC}{0000-0002-3770-2482} % Jose Tchamba
\newcommand{\orcidauthorD}{0000-0002-5137-1448} % Valter Chisingui
% \newcommand{\orcidauthorE}{} % Jonathan Muledi
\newcommand{\orcidauthorF}{0000-0003-3208-5443} % Mylor Shutcha
\newcommand{\orcidauthorH}{0000-0002-1802-0128} % Casey Ryan
\newcommand{\orcidauthorI}{0000-0001-9232-5221} % Kyle Dexter
\newcommand{\orcidauthorJ}{0000-0002-4852-7085} % Thom Brade
% Authors, for the paper (add full first names)
\Author{John L. Godlee $^{1}$\orcidA{}*, Francisco Maiato Gon\c{c}alves$^{2}\orcidB{}$, Jos\'{e} Jo\~{a}o Tchamba$^{2}\orcidC{}$, Antonio Valter Chisingui$^{2}\orcidD{}$, Jonathan Ilunga Muledi$^{3}$, Mylor Ngoy Shutcha$^{3}\orcidF{}$, Casey M. Ryan$^{1}\orcidH{}$, Thom K. Brade$^{1}\orcidH{}$ and Kyle G. Dexter$^{1,4}\orcidI{}$}
% Authors, for metadata in PDF
\AuthorNames{John L. Godlee, Francisco Maiato Goncalves and Kyle G. Dexter}
% Affiliations / Addresses (Add [1] after \address if there is only one affiliation.)
\address{%
$^{1}$ \quad School of GeoSciences, University of Edinburgh, Edinburgh, United Kingdom\\
$^{2}$ \quad Herbarium of Lubango, ISCED Hu\'{i}la, Sarmento Rodrigues Str. No. 2, CP. 230, Lubango, Angola\\
$^{3}$ \quad Ecologie, Restauration Ecologique et Paysage, Facult\'{e} des Sciences Agronomique, Universit\'{e} de Lubumbashi, Route Kasapa BP 1825, Democratic Republic of Congo\\
$^{4}$ \quad Royal Botanic Garden Edinburgh, Edinburgh EH3 5LR, United Kingdom}
% Contact information of the corresponding author
\corres{Correspondence: [email protected]}
% Current address and/or shared authorship
%\firstnote{Current address: Crew Building, The King's Buildings, Edinburgh, EH9 3FF, United Kingdom} % \dagger
% The commands \thirdnote{} till \eighthnote{} are available for further notes
%\simplesumm{} % Simple summary
%\conference{} % An extended version of a conference paper
% Abstract (Do not insert blank lines, i.e. \\)
% 200 words max
% (1) Background: Place the question addressed in a broad context and highlight the purpose of the study;
% (2) Methods: Describe briefly the main methods or treatments applied;
% (3) Results: Summarize the article's main findings; and
% (4) Conclusion: Indicate the main conclusions or interpretations.
% The abstract should be an objective representation of the article, it must not contain results which are not presented and substantiated in the main text and should not exaggerate the main conclusions.
\abstract{Seasonally dry woodlands are the dominant land cover across southern Africa. They are biodiverse, structurally complex and important for ecosystem service provision. Species composition and structure vary across the region producing a diverse array of woodland types. The woodlands of the Hu\'{i}la plateau in southwest Angola represent the extreme southwestern extent of the miombo ecoregion and are markedly drier than other woodlands within this ecoregion. They remain understudied however, compared to woodlands further east in the miombo ecoregion. We conducted a plot-based study in Bicuar National Park, southwest Angola, comparing tree species composition and woodland structure with similar plots in Tanzania, Mozambique, and the Democratic Republic of Congo. We found plots in Bicuar National Park had comparatively low tree species diversity, but contained 28 tree species not found in other plots. Plots in Bicuar had low basal area, excepting plots dominated by \textit{Baikiaea plurijuga}. In a comparison of plots in intact vegetation with plots in areas previously disturbed by shifting-cultivation agriculture, we found species diversity was marginally higher in disturbed plots. Our study supports the idea that Bicuar National Park and the woodlands of the Hu\'{i}la plateau should be a conservation priority, representing a unique woodland mosaic with an uncommon mixture of woodland types within a small area.}
% Keywords
% 3-10
\keyword{Woodland, Miombo, Savanna, Diversity, Disturbance, Baikiaea}
% The fields PACS, MSC, and JEL may be left empty or commented out if not applicable
%\PACS{J0101}
%\MSC{}
%\JEL{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Diversity
\LSID{\url{http://}}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Applied Sciences:
%\featuredapplication{Authors are encouraged to provide a concise description of the specific application or a potential application of the work. This section is not mandatory.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Data:
%\dataset{DOI number or link to the deposited data set in cases where the data set is published or set to be published separately. If the data set is submitted and will be published as a supplement to this paper in the journal Data, this field will be filled by the editors of the journal. In this case, please make sure to submit the data set as a supplement when entering your manuscript into our manuscript editorial system.}
%\datasetlicense{license under which the data set is made available (CC0, CC-BY, CC-BY-SA, CC-BY-NC, etc.)}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Only for the journal Toxins
%\keycontribution{The breakthroughs or highlights of the manuscript. Authors can write one or two sentences to describe the most important part of the paper.}
%\setcounter{secnumdepth}{4}
\input{include/data_descrip_figures.tex}
\input{include/dbh_bin_figures.tex}
\input{include/beta_div_figures.tex}
\input{include/plot_div_figures.tex}
\input{include/degrad_figures.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Introduction}
Tropical woodlands extend over 12 countries in central and southern Africa, with an estimated area of \textasciitilde{}3.7 million km\textsuperscript{2} \citep{White1983, Mayaux2004, Arino2010}. Within this, miombo woodlands are the dominant vegetation type, characterised by trees of the \textit{Brachystegia}, \textit{Julbernardia} and \textit{Isoberlinia} genera, all within the Fabaceae family, subfamily Detaroideae \citep{Chidumayo1997, Campbell2002, Azani2017}. These genera are seldom found as dominant species outside miombo woodlands, and while their contribution to the biomass of miombo woodlands is substantial, it varies throughout the region \citep{Campbell2002}. Across the range of southern African woodlands, variation in climate, edaphic factors, disturbance regimes and biogeography maintain a diverse array of woodland types in terms of both species composition and physiognomy \citep{Privette2004, Caylor2004, Chidumayo2002}.
The miombo ecoregion extends across the continent in a wide band that reaches north into Kenya and the Democratic Republic of Congo (DRC) and south into the northeast of South Africa (\hyperref[plot_map]{Figure 1a}). Miombo woodlands are defined both by their tree diversity and by their structure of a grassy herbaceous understorey with an often sparse tree canopy. In archetypical miombo woodlands, species of the genera \textit{Brachystegia}, \textit{Julbernardia} and \textit{Isoberlinia} generally hold the most biomass, forming a mostly open woodland canopy. Distinct from dry tropical forests, miombo woodlands generally maintain a grassy understorey dominated by C4 grasses \citep{Dexter2015}. Miombo woodlands are heavily structured by seasonal fire and herbivory, with fire particularly often prevent the creation of a closed tree canopy which would naturally occur in the absence of these disturbances \citep{Oliveras2016, Dantas2016}. Within the miombo ecoregion, other woodland types exist, notably, woodlands dominated by \textit{Baikiaea} spp. or \textit{Colophospermum mopane} \citep{Campbell2002}.
Southern African woodlands are structurally complex but species poor in the tree layer compared to dry tropical forests which exist at similar latitudes \citep{DRYFLOR2016, Torello-raventos2013}. These woodlands contain many endemic tree species however, and support a highly diverse woodland understorey, with an estimated 8500 species of vascular plants \citep{Frost1996}. Miombo woodlands are provide ecosystem service provision for an estimated 150 million people \citep{Ryan2016}. Additionally miombo woodlands hold \textasciitilde{}18-24 Pg C in woody biomass and soil organic carbon, which is comparable to that held in the rainforests of the Congo basin (\textasciitilde{}30 Pg C) \citep{Mayaux2008}. As woodland resource extraction and conversion to agricultural land accelerates due to growing human populations, the conservation of miombo woodlands as a biodiverse and unique ecosystem has become a growing concern. Despite their importance however, dry tropical woodlands remain understudied compared to wet forests across the globe \citep{Clarke2017}.
Over the previous two decades, the limited ecological research in southern African woodlands has been concentrated in the central and eastern parts of the miombo region, notably in southern Tanzania, Mozambique, Malawi, Zimbabwe and Zambia. The south-western extent of miombo woodlands, which is found entirely within Angola has received considerably less attention \citep{Huntley2019}. Partly this is due to diminished research capacity during the Angolan civil war following the country's independence, which took place officially between 1975 and 2002, but with sporadic localised periods of civil unrest until around 2012 \citep{Oliveira2015}. While botanical surveys of woodlands in this region are more plentiful \citep{Huntley2019, Figueiredo2009}, joint studies of woodland species composition and physical structure remain scarce. This is despite the value of these studies in helping to estimate woodland net primary productivity, carbon sequestration potential, and studies of community assemblage. To properly understand spatial variation in woodland species composition and physical structure across the miombo ecoregion, it is necessary to fill understudied gaps such as those in southwest Angola.
The miombo woodlands of southwest Angola are found in their most intact form in Bicuar National Park and to a lesser extent in the adjacent Mupa National Park, on the Hu\'{i}la plateau \citep{Chisingui2018}. Both of these national parks have been protected to varying extents since 1938 \cite{Huntley2019}. These woodlands exist in much drier conditions than other miombo woodlands, precipitation diminishes rapidly within the Hu\'{i}la plateau towards the Angolan coast and the Namib desert (\hyperref[plot_map]{Figure 1a}). The vegetation of the Hu\'{i}la plateau holds many endemic species, around 83 endemic Fabaceae species \citep{Soares2007} and the most endemic plant species of any part of Angola \citep{Figueiredo2008}. \citet{Linder2001} and \citet{Droissart2018} both identify the western portion of the Hu\'{i}la plateau as a centre of tropical African endemism. \citet{Baptista2019} also identify Bicuar National Park as a centre for herpetofaunal diversity.
Much of the historic miombo woodland area in southwest Angola surrounding the Bicuar and Mupa National Parks has been deforested in recent years, with a clear increase in deforestation activity since the end of the civil war owing to an increase in rural population and agricultural activity \citep{Schneibel2013, Huntley2019}. The western extent of miombo woodlands found within Bicuar National Park plateau are therefore of great importance for conservation as a refuge for wildlife and endemic plant species \citep{Huntley2019}.
It is important to focus not only on the biodiversity of undisturbed woodland areas but also previously disturbed land in order to properly assess the biodiversity and woodland structure of the Park. Woodland disturbance through shifting cultivation practices produces novel habitats which are not necessarily of lower conservation value \citep{McNicol2015, Goncalves2017}. Since Bicuar National Park's rejuvenation following the reinforcement of park boundaries after the civil war, many areas of woodland that were previously heavily grazed, farmed via shifting cultivation techniques, and used for timber extraction have been allowed to re-establish and are now protected from further human resource extraction. This presents a unique opportunity to compare the species composition of these disturbed areas with areas of nearby woodland that have not been farmed in living memory.
In this study we present results of the tree diversity and woodland structure of miombo woodlands found at the far western extent of miombo woodlands in Bicuar National Park, Hu\'{i}la province, Angola. Our study utilised recently installed biodiversity monitoring plots set up within the park in 2018 and 2019. We compare the tree diversity and woodland structure of Bicuar National Park with biodiversity monitoring plots previously established in other areas of miombo woodland across the miombo ecoregion which use a common plot biodiversity census methodology. In addition, we take advantage of a unique opportunity to compare the tree species composition of areas of abandoned and now protected farmland that have begun to re-establish as woodland.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Materials and Methods}
\subsection{Study area}
We chose three areas of miombo woodland across the miombo ecoregion to compare with those in Bicuar National Park, Angola (S15.1$^\circ$, E14.8$^\circ$). The three sites were Gorongosa National Park in central Mozambique (S19.0$^\circ$, E34.2$^\circ$) \citep{Ryan2011}, Kilwa District in southern Tanzania (S9.0$^\circ$, E39.0$^\circ$) \citep{McNicol2018}, and the Mikembo Natural Reserve in Katanga, southern Democratic Republic of Congo (DRC) (S11.5$^\circ$, E27.7$^\circ$) \citep{Muledi2017}. Within each of these woodland sites, multiple one hectare square plots had been installed previously to monitor biodiversity and biomass dynamics. In Katanga, a larger 10 ha plot was subdivided into ten 1 ha plots for this study. We used these previous censuses, collected between 2010 and 2019, to estimate tree biodiversity and woodland structure. Sites range in Mean Annual Precipitation (MAP) from 864 mm y\textsuperscript{-1} in Bicuar to 1115 mm y\textsuperscript{-1} in Katanga. Mean Annual Temperature ranges from \textasciitilde{}20.5 $^\circ$C in Bicuar and Katanga to \textasciitilde{}25.8 $^\circ$C in Kilwa (\hyperref[temp_precip]{Figure 1b}).
Bicuar National Park covers an area of \textasciitilde{}7900 km\textsuperscript{2}, established as a hunting reserve in 1938, and later as a national park in 1964 (\autoref{bicuar_map}). While fauna populations in the Park were severely damaged by the Angolan civil war, the interior of the Park remains as a largely intact mosaic of miombo woodland, Baikiaea-Burkea woodland, shrub/thicket vegetation and seasonally flooded grassland. Encroachment of agriculture and grazing, particularly along the northwest and western boundaries of the Park, has led to a fragmented park boundary with patches of diminished thicket and woodland in areas of previously farmed land that have been protected since park boundaries were re-established following the end of the civil war.
\subsection{Plot data collection}
We sampled \nplotsbicuar{} one hectare plots in Bicuar National Park and collated data from a total of \nplots{} one hectare plots across the miombo ecoregion within four sites. \hyperref[plot_map]{Figure 1a} and \autoref{group_descrip} show the locations and general description of each site, respectively. Plots in Bicuar were situated at least 500 m from the edge of a woodland patch to prevent edge effects which may have altered tree species composition.
Within each plot, every tree stem $\ge$5 cm stem diameter was recorded, except in the DRC plots, where only stems $\ge$10 cm stem diameter were recorded. For each tree stem the species and stem diameter were recorded. Tree species were identified using local botanists at each site and taxonomy was later checked against the African Plant Database \citep{APD2020}. In all sites \citet{Palgrave2003} and various other texts were used as a guide for species identification in the field. Specimens that could not be identified in the field, or subsequently at herbaria, were described as morphospecies. All tree species within the Bicuar National Park plots were identified. Tree coppicing due to fire, herbivory, and human actions is common in miombo woodlands, therefore, for trees with multiple stems, each stem $\ge$5 cm stem diameter was recorded, while the parent tree was also recorded for diversity analyses described below.
Stem diameter was recorded at 1.3 m from the ground along the stem as per convention (diameter at breast height, DBH) from the ground along the stem as per convention using a diameter tape measure \citep{Kershaw2017}. Where stem abnormalities were present at 1.3 m from the ground, which precluded the accurate estimation of stem diameter at 1.3 m, the stem diameter was recorded at the nearest 10 cm increment above 1.3 m without significant stem abnormalities \citep{Kershaw2017}. To ensure consistency among stem diameter values recorded at different heights, when the stem diameter was recorded at a height other than 1.3 m the stem diameter at 1.3 m was estimated from the recorded stem diameter using a cubic polynomial equation which adjusts for tree stem taper. This equation was calibrated on 100 stems measured at multiple heights in Niassa Province, Mozambique (\hyperref[appendixa]{Appendix A}). Stems below 10 cm stem diameter were not measured in the DRC plots. We therefore estimated the number of 5-10 cm stems in each these plots by extrapolating a linear regression of log stem abundance across the available stem diameter classes.
In addition to the one hectare plots across the miombo ecoregion, we compared the tree biodiversity of undisturbed areas of miombo woodland in Bicuar National Park with areas of disturbed woodland around the edge of the Park that had been previously farmed via shifting cultivation methods, and had since been abandoned and reclaimed within the Park boundaries \autoref{bicuar_map}. We identified areas previously farmed with the help of park rangers and local residents who identified these areas from memory. We conducted \ndegradplots{} plot surveys of woodland diversity and structure in these areas with 20$\times$50 m (0.1 ha) plots, and compared their diversity and structure with 20$\times$50 m subsamples of the \nplotsbicuar{} one hectare plots within the Park interior. Like the one hectare plots, within these smaller 20$\times$50 m plots we recorded the species and stem diameter of every tree stem $\ge$5 cm stem diameter.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/bicuar_map}
\caption{Location of plots in Bicuar National Park, southwest Angola. The Park boundary is shown as a pink outline, according to \citet{WDPA2019}. One hectare undisturbed plots are shown as red points, while disturbed 20$\times$50 m (0.1 hectare) plots are shown as blue points. The map background is a true colour composite satellite image generated using the Google Maps Static Maps API in the \texttt{ggmap} R package \citep{ggmap}.}
\label{bicuar_map}
In addition to the one hectare plots across the miombo ecoregion, we compared the tree biodiversity of undisturbed areas of miombo woodland in Bicuar National Park with areas of disturbed woodland around the edge of the Park that had been previously farmed via shifting cultivation methods, and had since been abandoned and reclaimed within the Park boundaries. We identified areas previously farmed with the help of park rangers and local residents who identified these areas from memory. We conducted \ndegradplots{} plot surveys of woodland diversity and structure in these areas with 20$\times$50 m (0.1 ha) plots, and compared their diversity and structure with 20$\times$50 m subsamples of the \nplotsbicuar{} one hectare plots within the Park interior. Like the one hectare plots, within these smaller 20$\times$50 m plots we recorded the species and stem diameter of every tree stem $\ge$5 cm stem diameter.
\subsection{Climatic data}
The WorldClim dataset \citep{Fick2017} was used to gather data on plot-level climatic conditions. We estimated Mean Annual Precipitation (MAP) as the mean of total annual precipitation values between 1970 and 2000, and Mean Annual Temperature (MAT) as the mean of mean annual temperatures between 1970 and 2000. The seasonality of temperature (MAT SD) was calculated as the standard deviation of monthly temperature per year, respectively. We estimated Climatic Water Deficit (CWD) for each plot according to \citep{Chave2014}, as the sum of the difference between monthly rainfall and monthly evapotranspiration when the difference is negative, using the dataset available at \url{http://ups-tlse.fr/pantropical_allometry.htm}, which uses data from the WorldClim dataset 1970-2000.
\subsection{Data analysis}
We calculated the basal area of each stem ($g_{i}$) using:
\begin{equation}
g_{i} = \pi{} \times (d_{i} / 2)^{2}
\end{equation}
Where $d_{i}$ is the estimated stem diameter of stem $i$ at 1.3 m having accounted for tree taper. We then calculated the total basal area of each plot as the sum of each stem's basal area. For the DRC plots which lacked 5-10 cm stems, we estimated basal area in this stem diameter class from our extrapolation of stem abundance in the 5-10 cm diameter class, assuming a mean stem diameter of 7.5 cm.
All diversity measures were calculated on individual tree-level data, rather than stem-level data, to avoid artificial inflation of abundance for those species which readily coppice. We calculated the alpha diversity of each plot using the Shannon-Wiener index ($H'$) %(\autoref{shannon}),
using the \texttt{vegan} package in R \citep{vegan}.
We calculated the pairwise beta diversity among sites using the S\o{}rensen coefficient ($S_{S}$) %(\autoref{sorensen})
\citep{Koleff2003}. We analysed the difference in alpha diversity measures and woodland structural variables among groups of plots using Analysis of Variance (ANOVA) statistical models, with a null hypothesis that there was no difference among groups of plots. Post-hoc Tukey's HSD tests were used to investigate the degree to which pairwise combinations of plot groups differed in each case.
We calculated $S_{S}$ for each pairwise combination of sites using aggregated species composition data from all plots in each site. The value of $S_{S}$, which ranges between zero and one, was multiplied by 100 to give a ``percentage similarity'' between communities in species composition. We estimated abundance evenness for each plot using Shannon equitability index ($E_{H'}$) \citep{Smith1996} which is the ratio of $H'$ to the log transformed species richness.
% , then calculated the mean of these values of $S_{S}$, producing a single value of $S_{S}$ for each pairwise combination of sites \citep{Baselga2012}
We used Non-metric Multidimensional Scaling (NMDS) to assess the variation in species composition among one hectare plots, and also between disturbed and undisturbed 20$\times$50 m plots within Bicuar National Park, using the \texttt{vegan} R package. The number of dimensions for NMDS was minimised while ensuring the stress value of the NMDS fit was $\ge$0.1. NMDS analyses were run with 500 random restarts to ensure a global solution was reached. We used Bray-Curtis dissimilarity as the optimal measure of ecological distance \citep{Legendre2013}. We fit plot-level estimates of MAP, MAT, the seasonality of MAT and CWD to the first two axes of the resulting ordination using the \texttt{envfit()} function in the \texttt{vegan} R package to investigate how these environmental factors influenced the grouping of species composition among plots. All analyses were conducted in R version 3.6.1 \citep{RCoreTeam2019}.
% Materials and Methods should be described with sufficient details to allow others to replicate and build on published results. Please note that publication of your manuscript implicates that you must make all materials, data, computer code, and protocols associated with the publication available to readers. Please disclose at the submission stage any restrictions on the availability of materials or information. New methods and protocols should be described in detail while well-established methods can be briefly described and appropriately cited.
% Research manuscripts reporting large datasets that are deposited in a publicly available database should specify where the data have been deposited and provide the relevant accession numbers. If the accession numbers have not yet been obtained at the time of submission, please state that they will be provided during review. They must be provided prior to publication.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Results}
\subsection{Alpha diversity}
In Bicuar National Park we measured a total of \nbicuartrees{} trees within the one hectare plots, and across the four sites, a total of \ntrees{} trees were sampled. Trees in Bicuar National Park belonged to \nbicuarspecies{} species within \nbicuarfamilies{} families. Across all four sites we recorded \nspecies{} species from \nfamilies{} families. The most diverse family within each site and among all plots was Fabaceae with \nfabaceaespecies{} species. We encountered \nbicuaruniquespecies{} tree species in Bicuar National Park which were not found in the other miombo woodland plots (\autoref{bicuar_species}). The most common of these unique species were \textit{Brachystegia tamarindoides} (n = \nbg{}), \textit{Baikiaea plurijuga} (n = \nbp{}) and \textit{Baphia massaiensis} (n = \nbm{}). Four species unique to Bicuar National Park within this dataset only had one individual recorded: \textit{Elachyptera parvifolia}, \textit{Entandrophragma spicatum}, \textit{Oldfieldia dactylophylla}, \textit{Peltophorum africanum}.
\input{include/bicuar_species.tex}
Alpha diversity in Bicuar National Park was low compared to other sites (\autoref{div_box}). Mean $H'$ across plots in Bicuar National Park was \bicuarshannon{}. An ANOVA showed a significant difference in $H'$ among sites (\lmshannon{}), and a post-hoc Tukey's test showed that $H'$ in plots in Bicuar National Park was significantly different from those in DRC ($H'$ = \drcshannon{}, \tukeyshannonbicuardrc{}), Mozambique ($H'$ = \nhamshannon{}, \tukeyshannonbicuarnham{}) and Tanzania ($H'$ = \kilwashannon{}, \tukeyshannonbicuarkilwa{}). Variation in $H'$ is large within Bicuar National Park, with $H'$ ranging from \bicuarminshannon{} to \bicuarmaxshannon{}, but this was a similar range to other sites. In contrast, the range of species richness within Bicuar National Park was much lower than other sites, suggesting that the wide range in $H'$ was caused by variation in abundance evenness.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/div_box}
\caption{Variation of alpha diversity estimates and basal area among sites. Boxes bound the 1st and 3rd quartiles, with the median within the box. Whiskers represent 1.5 times the interquartile range plus or minus the 1st and 3rd quartiles, respectively. Values found beyond the whiskers are shown individually as points.}
\label{div_box}
\end{figure}
\subsection{Beta diversity}
The NMDS of plot species composition among one hectare plots was run with four dimensions. The stress value was \nmdsstress{}. Plot diversity in Bicuar National Park formed three distinct groups. Bicuar plots 9, 13, and 15 were characterised by high abundances of \textit{Baikiaea plurijuga}, \textit{Baphia massaiensis} and \textit{Croton gratissimus}, according to species scores from the NMDS. Bicuar plots 4, 11, and 12 were characterised by \textit{Brachystegia tamarindoides}, and \textit{Ochna pulchra}. The third group consisting of the remaining seven plots surprisingly had a species composition most similar to that of plots in the DRC group according to the NMDS, sharing the core miombo species of \textit{Julbernardia paniculata} and \textit{Pterocarpus angolensis}. This group of plots in Bicuar National Park was further characterised by the abundance of \textit{Pterocarpus lucens}, \textit{Strychnos pungens} and \textit{Bridelia mollis} however, which were not present in the DRC plots. All environmental factors fitted to the NMDS ordination significantly influenced the grouping of plots (\autoref{all_nmds_envfit}). MAT explained the most variation in plot position on the first two NMDS axes (\nmdsmat{}), followed by CWD (\nmdsmapsd{}), the seasonality of MAT (\nmdsmatsd{}) and MAP (\nmdsmap{}). Variation in MAP drove much of the difference between plots in Bicuar National Park versus those in Tanzania and Mozambique.
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/all_nmds_envfit}
\caption{Environmental factors fitted to the first two axes of the NMDS ordination of species composition of one hectare plots, showing the variation in plot species composition within and among sites. Diamonds are plot scores coloured by site. The lengths of arrows indicating environmental factor fits are scaled by R\textsuperscript{2}. Arrows point in the direction of increasing values of that environmental factor.}
\label{all_nmds_envfit}
\end{figure}
The pairwise S\o{}rensen coefficient of percentage similarity ($S_{S}$) showed that the species composition of plots in Bicuar National Park had low similarity with other sites in the study, sharing few species with other sites (\autoref{site_pairs_js}). Similar to the NMDS, these results show that plots in Bicuar National Park are most similar to those found in DRC.
\input{include/site_pairs_js.tex}
\subsection{Woodland structure}
Mean basal area of plots in Bicuar National Park was \babicuar{} m\textsuperscript{2} ha\textsuperscript{-1}, ranging from \bicuarbamin{} to \bicuarbamax{} m\textsuperscript{2} ha\textsuperscript{-1} (\autoref{div_box}). An ANOVA showed a significant difference in basal area among sites (\lmba{}), and a post-hoc Tukey's test showed that basal area in Bicuar National Park was significantly lower than plots in DRC (BA = \badrc{} m\textsuperscript{2} ha\textsuperscript{-1}, \tukeybabicuardrc{}), but there were no significant differences between Bicuar and Mozambique (BA = \banham{} m\textsuperscript{2} ha\textsuperscript{-1}, \tukeybabicuarnham{}) or Tanzania (BA = \bakilwa{} m\textsuperscript{2} ha\textsuperscript{-1}, \tukeybabicuarkilwa{}) (\autoref{div_box}). Additionally, Bicuar plots had less variation in basal area among plots than other sites. Plots in Bicuar with the highest basal area were dominated by \textit{Baikiaea plurijuga} and \textit{Baphia massaiensis} (Plots 9, 13, and 15).
The stem diameter abundance distribution in Bicuar National Park was comparable with other sites (\autoref{stem_ab_dbh_bin}), albeit with fewer stems in each class. The slope of log mean stem size distribution among diameter bins was \dbhslopebicuar{} in Bicuar National Park, \dbhslopedrc{} in DRC, \dbhslopekilwa{} in Tanzania, and \dbhslopenham{} in Mozambique.
\begin{figure}[H]
\centering
\includegraphics[width=\textwidth]{img/stem_ab_dbh_bin_group}
\caption{Ranked variation between plots in stem number within each site, with bars stacked according to stem diameter class. Error bars are the mean $\pm$ 1 standard error. The dashed bar for the DRC 5-10 cm stem diameter class indicates that these measurements were estimated from the average proportion of 5-10 cm stems in all other plots.}
\label{stem_ab_dbh_bin}
\end{figure}
\subsection{Effect of disturbance via shifting cultivation on diversity within Bicuar National Park}
There was a clear difference in the species composition of previously farmed disturbed woodland plots and undisturbed woodland plots, but with some overlap (\autoref{bicuar_degrad_nmds}). Notably, Plots 4 and 7 in putatively undisturbed woodland have a species composition more resembling the disturbed plots. These two plots were dominated by \textit{Brachystegia tamarindoides} and \textit{Burkea africana}, with \textit{B. africana} being a species which occurred frequently as a pioneer in the disturbed plots. The undisturbed plots 15, 13, and 9 represent distinct outliers in the NMDS. These three plots were dominated by \textit{Baikiaea plurijuga} which was not encountered in the disturbed plots. The most common species in the disturbed plots was \textit{Baphia massaiensis} (n = \nbmdegrad{}), with a mean stem diameter of \bmdbhdegrad{} cm, while in the undisturbed plots the most common species was \textit{Julbernardia paniculata} (n = \njpdegrad{}), with a mean stem diameter of \jpdbhbicuar{} cm. Mean alpha diversity was marginally higher in disturbed plots ($H'$ = \degradshannon{}) than in undisturbed plots ($H'$ = \bicuarsubshannon{}) and an ANOVA showed that there was a significant difference in $H'$ between the two plot types(\lmshannondegrad{}) (\autoref{degrad_box}). Mean plot species richness was also lower in undisturbed plots (\bicuarsubrich{}) than disturbed plots (\degradrich{}). Mean $E_{H'}$ was \degradequit{} in disturbed plots and \bicuarsubequit{} undisturbed plots but there was no significant difference between disturbed and undisturbed plots according to an ANOVA (\lmequitdegrad{}). \ndegradonlyspecies{} species were found only in the disturbed plots and not in the undisturbed plots. The most common of these were \textit{Combretum celastroides} (n = \nccdegrad{}), \textit{Acacia reficiens} (n = \nvrdegrad{}), and \textit{Gardenia ternifolia} (n = \ngtdegrad{}). \nbigonlyspecies{} were found only in undisturbed plots, the most common being \textit{Brachystegia spiciformis} (n = \nbsbig{}), \textit{Baikiaea plurijuga} (n = \nbpbig{}) and \textit{Combretum apiculatum} (n = \ncabig{}). Mean basal area was higher in undisturbed plots (\bicuarsubba{} m\textsuperscript{2} ha\textsuperscript{-1}) than disturbed plots (\degradba{} m\textsuperscript{2} ha\textsuperscript{-1}).
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/bicuar_degrad_nmds}
\caption{NMDS ordination of species composition of 20$\times$50 m (0.1 ha) plots showing plot scores as coloured diamonds located in disturbed (blue) and undisturbed (red) areas of woodland in Bicuar National Park.}
\label{bicuar_degrad_nmds}
\end{figure}
\begin{figure}[H]
\centering
\includegraphics[width=0.8\textwidth]{img/degrad_box}
\caption{The variation in diversity and woodland structure between disturbed and undisturbed 20$\times$50 m (0.1 ha) plots in Bicuar National Park. Boxes bound the 1st and 3rd quartiles, with the median within the box. Whiskers represent 1.5 times the interquartile range plus or minus the 1st and 3rd quartiles, respectively. Values found beyond the whiskers are shown individually as points.}
\label{degrad_box}
\end{figure}
%% If the documentclass option "submit" is chosen, please insert a blank line before and after any math environment (equation and eqnarray environments). This ensures correct linenumbering. The blank line should be removed when the documentclass option is changed to "accept" because the text following an equation should not be a new paragraph.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Discussion}
\subsection{Comparison of Bicuar National Park with other woodlands within the miombo ecoregion}
We compared the tree species diversity and woodland structure of arid woodlands in Bicuar National Park in southwest Angola with three other woodland sites across the miombo ecoregion. Our results show that Bicuar National Park is distinct in both woodland structure and species composition from these other woodlands. Notably, plots in Bicuar National Park contained 27 tree species which did not occur at other sites. This lends support for the Hu\'{i}la Plateau as an important area for conservation of southern African woodland landscapes. The woodlands in Bicuar National Park were of low tree basal area, with few large trees except in plots dominated by \textit{Baikiaea plurijuga}. Many other studies have drawn a relationship between water availability and basal area \citep{Terra2018, Strickland2016}, and our study supports this, with Bicuar National Park being the most arid of the four sites considered in our study. The NMDS of species composition also suggests that plots in Bicuar National Park are influenced by aridity. While there are more arid woodlands within southern Africa, with Mopane woodlands for example often being particularly dry, these plots in Bicuar National park represent particularly dry miombo woodlands.
\subsection{Delineation of woodland types within Bicuar National Park}
Within Bicuar National Park, three distinct woodland types were identified. The first, dominated by \textit{Baikiaea plurijuga} and \textit{Baphia massaiensis} represents the Baikiaea woodland type commonly found to the south of the miombo ecoregion \citep{Timberlake2010}. This is supported by \citet{Chisingui2018} who also found Baikiaea woodlands as a distinct woodland type in the Park. \textit{B. plurijuga} has been identified as an important species for conservation, being attractive for selective logging due to its large stature \citep{Ngandwe2017, Wallenfang2015}. The woodlands created by \textit{B. plurijuga} are also an important habitat for elephants (\textit{Loxodonta africana}) \citep{Sianga2017, Mukwashi2012}, with Bicuar National Park and Mupa National Park being key refugia for this animal in the Hu\'{i}la plateau region. The second woodland type, dominated by \textit{Brachystegia tamarindoides} and \textit{Ochna pulchra} represents a form of small stature woodland with a shrubby understorey and sparse canopy trees, which commonly occurs as a result of repeated disturbance by fire, or poor soil structure \citep{Smith2004}. The remaining plots resemble the more archetypical miombo woodland with \textit{Julbernardia paniculata}, though with a number of species not seen in plots further to the east of the miombo ecoregion such as \textit{Strychnos pungens}. This mosaic of woodland types makes Bicuar National Park a valuable reservoir of diversity and strengthens the case for the Park being a key conservation asset within the Hu\'{i}la plateau and the larger southern African region. While there are regional boundaries between Baikiaea and miombo woodlands \citep{White1983}, within Bicuar National Park it is likely that mosaic of woodland types has been created by a combination of soil water capacity and disturbance history. Bicuar has a distinct landscape of wide shallow grassy valleys surrounded by woodland on higher ground (\autoref{bicuar_map}). On some of these high points the soil is particularly sandy, resembling the Kalahari sand soils found further east and south \citep{Huntley2019}, and these areas coincide with the presence of Baikiaea woodlands \citep{Campbell2002}. High levels of disturbance by fire in these Baikiaea patches may additionally prevent a transition to an alternative woodland type via the control of sapling growth.
\subsection{Comparison of disturbed and undisturbed woodland plots}
Previously disturbed woodlands around the edge of Bicuar National Park were found to share many species with undisturbed plots in the Park, but with some additional species which did not occur in the undisturbed plots. They also lacked notable archetypical miombo species which tend to form larger canopy trees such as \textit{Brachystegia spiciformis} and contained very few \textit{Julbernardia paniculata}, leading to a distinct woodland composition. The species diversity of these disturbed patches was higher than was found in the undisturbed plots, a result which has been corroborated by other studies in miombo woodlands \citep{Caro2001, McNicol2018b, Shackleton2000}. Other studies have shown a peak in species richness during woodland regrowth as pioneer species take advantage of a low competition environment, while some later stage woodland species remain from before the original disturbance \citep{Goncalves2017, Kalaba2013}. This suggests that reclamation of previously farmed and abandoned land for landscape conservation in this ecological context is a valuable management strategy.
In disturbed plots near the edge of the Park, there was a lack of species which tend to grow to large canopy trees, possibly due to them being repeatedly felled for timber prior to reclamation by the Park, or due to them being unable to recruit into a more open, shrubby woodland. Despite this lack of canopy forming tree species, some disturbed plots had a greater basal area than undisturbed plots, possibly due to high levels of coppicing in these plots. Indeed, stem density was higher in undisturbed plots. This can lead to species that would otherwise remain small producing a much larger basal area as they grow multiple stems under high disturbance conditions \citep{Luoga2004}. The most common species in the disturbed plots were \textit{Combretum psidioides}, \textit{Combretum collinum} and \textit{Terminalia sericea}, members of the Combretaceae family all of which more commonly remain as smaller multi-stemmed trees in disturbed woodlands, rather than growing to larger canopy trees \citep{Wyk2014}. This result could be considered at odds with other studies which report lower woody biomass in plots that have experienced harvesting (e.g. \citealt{Muvengwi2020}). It is important to consider however that our study took place in plots that were measured after farming had been abandoned for at least 7 years, with time for regeneration to occur. It is possible that over time tree basal area will decrease as coppiced shrubby trees are replaced by core miombo species in the transition back to miombo woodland \citep{Goncalves2017}. Bicuar National Park offers a valuable case study to track woodland regeneration in real-time over the next decade in these previously farmed and now protected woodland plots, which could improve our understanding of this potential post-disturbance peak in basal area.
In conclusion, the woodlands of Bicuar National Park represent an important woodland refugia at the far western extent of the miombo ecoregion. These woodlands, both those disturbed by previous farming activity and those which remain undisturbed, possess a number of species not found commonly in other miombo woodland plots around the region. They may also house important genetic variation for widespread species, representing populations adapted to more arid conditions. Our study highlights the variation in species composition across the miombo ecoregion and underlines the need for studies which incorporate plot data from multiple locations to reach generalisable conclusions about the region as a whole. Additionally, the installation of 15 one hectare woodland monitoring plots and a further twenty 20$\times$50 m plots in previously farmed and now protected land offer a valuable natural laboratory to further explore the dynamics of dry miombo woodlands of the Hu\'{i}la plateau. Bicuar National Park should be considered a key conservation asset within the Hu\'{i}la plateau and within the miombo ecoregion, as a whole as a successfully protected example of an arid woodland mosaic.
%\section{Conclusions}
%This section is not mandatory, but can be added to the manuscript if the discussion is unusually long or complex.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%\section{Patents}
%This section is not mandatory, but may be added if there are patents resulting from the work reported in this manuscript.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\vspace{6pt}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% optional
%\supplementary{The following are available online at \linksupplementary{s1}, Figure S1: title, Table S1: title, Video S1: title.}
% Only for the journal Methods and Protocols:
% If you wish to submit a video article, please do so with any other supplementary material.
% \supplementary{The following are available at \linksupplementary{s1}, Figure S1: title, Table S1: title, Video S1: title. A supporting video article is available at doi: link.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\authorcontributions{Investigation and project administration was conducted by J.L.G., F.M.G., J.J.T. and A.V.T. (Bicuar National Park), C.M.R. (Tanzania, Mozambique), J.I.M. and M.N.S. (DRC). The study was conceived by J.L.G. and K.G.D.. Data curation, methodology, formal analysis and writing--original draft preparation was conducted by J.L.G.. All authors contributed to writing--review and editing.}
%For research articles with several authors, a short paragraph specifying their individual contributions must be provided. The following statements should be used ``conceptualization, X.X. and Y.Y.; methodology, X.X.; software, X.X.; validation, X.X., Y.Y. and Z.Z.; formal analysis, X.X.; investigation, X.X.; resources, X.X.; data curation, X.X.; writing--original draft preparation, X.X.; writing--review and editing, X.X.; visualization, X.X.; supervision, X.X.; project administration, X.X.; funding acquisition, Y.Y.'', please turn to the \href{http://img.mdpi.org/data/contributor-role-instruction.pdf}{CRediT taxonomy} for the term explanation. Authorship must be limited to those who have contributed substantially to the work reported.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\funding{Final data preparation across all sites was funded by SEOSAW (a Socio-Ecological Observatory for the Southern African Woodlands), a NERC-funded project (Grant No. NE/P008755/1). The installation of woodland plots in Bicuar National Park and their data collection was funded by the National Geographic Society (Grant No. EC-51464R-18) to FMG, AVC, KGD and JLG. JLG was supported by a NERC E3 Doctoral Training Programme PhD studentship (Grant No. NE/L002558/1). The APC was funded by the University of Edinburgh.}
% APC = Article processing charge
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\acknowledgments{The rangers at Bicuar National Park are gratefully acknowledged for their help in installing the woodland survey plots and for their help with numerous other incidental challenges during fieldwork. Domingos Fortunato P. F\'{e}lix da Silva, Abel C. E. Cahali, Felisberto Gomes Armando, Jos\'{e} Cam\^{o}ngua Lu\'{i}s, Manuel Jundo Cachissapa and Henrique Jacinto are acknowledged for their help in conducting plot measurements in Bicuar National Park.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\conflictsofinterest{The authors declare no conflict of interest. The funders had no role in the design of the study; in the collection, analyses, or interpretation of data; in the writing of the manuscript, or in the decision to publish the results.}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% optional
\abbreviations{The following abbreviations are used in this manuscript:\\
\noindent
\begin{tabular}{@{}ll}
MAP & Mean Annual Precipitation\\
MAT & Mean Annual Temperature\\
NMDS & Non-metric Multidimensional Scaling\\
DD & Decimal Degrees\\
ANOVA & Analysis of Variance\\
\end{tabular}}
\newpage{}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% optional
\appendixtitles{yes} %Leave argument "no" if all appendix headings stay EMPTY (then no dot is printed after "Appendix A"). If the appendix sections contain a heading then change the argument to "yes".
\appendix
\section{Estimation of stem diameter at 1.3 m via tree taper} \label{appendixa}
\begin{lstlisting}[language=R]
##' @author Casey M. Ryan
##' @return d130, the estimated diameter at a POM of 1.3 m (in cm).
##' @param d_in the diameter measured at the POM (in cm)
##' @param POM the height of the POM (in m)
##' @details The adjustment based on tree taper model developed as part of
##' the ACES project (Abrupt Changes in Ecosystem Services
##' https://miomboaces.wordpress.com/), using data from the miombo of Niassa.
##' The model is a cubic polynomial, with three equations for different sized stems.
##' @section Warning: POMs >1.7 m are not adjusted.
POMadj <- function(d_in, POM) {
stopifnot(is.numeric(d_in),
is.numeric(POM),
POM >= 0,
sum(is.na(POM))==0,
length(POM) == length(d_in))
if (any(POM > 1.7))
warning("POMs >1.7 m are outside the calibration data, no correction applied")
NAS <- is.na(d_in)
d_in_clean <- d_in[!NAS]
POM_clean <- POM[!NAS]
# define the size class edges:
edges <- c(5.0, 15.8, 26.6, 37.4)
sm <- d_in_clean < edges[2]
med <- d_in_clean >= edges[2] & d_in_clean < edges[3]
lg <- d_in_clean >= edges[3]
# compute predictions for delta_d, for all size classes
delta_d <- data.frame(
# if small:
small = 3.4678+-5.2428 *
POM_clean + 2.9401 *
POM_clean^2+-0.7141 *
POM_clean^3,
# if med
med = 4.918+-8.819 *
POM_clean + 6.367 *
POM_clean^2+-1.871 *
POM_clean^3,
# if large
large = 9.474+-18.257 *
POM_clean + 12.873 *
POM_clean^2+-3.325 *
POM_clean^3
)
# index into the right size class
dd <- NA_real_
dd[sm] <- delta_d$small[sm]
dd[med] <- delta_d$med[med]
dd[lg] <- delta_d$large[lg]
dd[POM_clean > 1.7] <- 0 # to avoid extrapolation mess
# add NAs back in
d130 <- NA
d130[NAS] <- NA
d130[!NAS] <- d_in_clean - dd
if (any(d130[!NAS] < 0))
warning("Negative d130 estimated, replaced with NA")
d130[d130 <= 0 & !is.na(d130)] <- NA
return(d130)
}
\end{lstlisting}
% The appendix is an optional section that can contain details and data supplemental to the main text. For example, explanations of experimental details that would disrupt the flow of the main text, but nonetheless remain crucial to understanding and reproducing the research shown; figures of replicates for experiments of which representative data is shown in the main text can be added here if brief, or as Supplementary data. Mathematical proofs of results not central to the paper can be added as an appendix.
% All appendix sections must be cited in the main text. In the appendixes, Figures, Tables, etc. should be labeled starting with `A', e.g., Figure A1, Figure A2, etc.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\reftitle{References}
% Please provide either the correct journal abbreviation (e.g. according to the “List of Title Word Abbreviations” http://www.issn.org/services/online-services/access-to-the-ltwa/) or the full name of the journal.
% Citations and References in Supplementary files are permitted provided that they also appear in the reference list here.
%=====================================
% References, variant A: external bibliography
%=====================================
\externalbibliography{yes}
\bibliography{lib.bib}
% To cite two works by the same author: \citeauthor{ref-journal-1a} (\citeyear{ref-journal-1a}, \citeyear{ref-journal-1b}). This produces: Whittaker (1967, 1975)
% To cite two works by the same author with specific pages: \citeauthor{ref-journal-3a} (\citeyear{ref-journal-3a}, p. 328; \citeyear{ref-journal-3b}, p.475). This produces: Wong (1999, p. 328; 2000, p. 475)
\end{document}
| {
"alphanum_fraction": 0.7723119203,
"avg_line_length": 127.3960612691,
"ext": "tex",
"hexsha": "f2c918ac4698e9d9818d053d142659a0a5f0ad55",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4a1d2afb82f20e8e097cf5c814b54247b6aa7188",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "johngodlee/bicuar_diversity_special_issue",
"max_forks_repo_path": "drafts/manuscript/special_issue_manuscript.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4a1d2afb82f20e8e097cf5c814b54247b6aa7188",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "johngodlee/bicuar_diversity_special_issue",
"max_issues_repo_path": "drafts/manuscript/special_issue_manuscript.tex",
"max_line_length": 2395,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4a1d2afb82f20e8e097cf5c814b54247b6aa7188",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "johngodlee/bicuar_diversity_special_issue",
"max_stars_repo_path": "drafts/manuscript/special_issue_manuscript.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 14155,
"size": 58220
} |
\documentclass[10pt]{article}
\usepackage[english]{babel}
\usepackage{gb4e}
\begin{document}
\begin{center}
{\bf \huge
TOPOLOGY AND BEHAVIOUR OF AGENTS: CAPITAL MARKETS
}
\end{center}
\begin{center}
O. Hudak
\end{center}
\begin{center}
Faculty of Finance, Matej Bel University, \\ Cesta na amfiteater 1, SK-974 15 Banska Bystrica, Slovak Republic,\\
e-mail: [email protected]
\end{center}
\newpage
\section*{Abstract}
On a capital market the social group is formed from traders. Individual behaviour of agents is influenced by the need to associate with other agents and to obtain the approval of other agents in the group. Making decisions an individual equates own needs with those of the other agents from the group. Any two agents from the group may interact. The interaction consists of the exchange of information and it costs some money. We assume that agents interact in such a way that they give reference to the origin of the information if asked by other agents. Thus the agent may verify obtained private information. Hudak recently used methods described by Rivier to study social behaviour of such agents. He characterized the quantity which corresponds to verification of information. Here we study a capital market and social behaviour of agents relations. Quantity which characterizes verification of information contributes to an aversion of an agent with respect to a risk. The mix of investments of an agent in a given cell with an average measure A of risk aversion in the cell is found from minimum of the average per cell aim function $<FM>$. Absolute minimum corresponds to such a state in which there is an optimal mix of the exchange of information for a given expectations about the capital market. The crowd and personal /$\approx <f>$/ contributions to the risk aversion of an agent are present in the aversion constant A. We have discussed a stable and metastable states of the market for different values of E, an expected return for a given investment period, of EV, an expected risk for a given investment period, and of b, a constant which characterizes contribution of the quantity $<f>$ to the risk aversion. Characteristics of the variance of n for the distribution of nonreducibile subgroups are found. Our model describes intermediary process effects.
\newpage
\section{Introduction}
In a social group its members are called agents.
The social group is characterized in general in accordance with
\cite{1} - \cite{5}: we assume that individual behaviour
of agents is influenced to some degree by the need to associate
with other agents and to obtain the approval of other agents in
the group which is characterized by a very large nonrational
and emotional element to decisions of agents. Making
decisions an individual equates own needs with those
of the other agents from the group. We assume that any two agents from the group
may interact. The interaction consists of the exchange of information
and it costs some "energy" e.i. there are costs associated with the exchange of information.
We assume that the information is well defined, and we
assume that agents interact in such a way that they give reference
to the origin of the information if asked by other agents.
The information is private \cite{6}. Public information is not studied in this paper.
Thus there is present information asymmetry.
We assume that the agent may verify obtained information.
It means that the agent is giving references to two and only two (here we assume two and only two foe simplicity) with which he/she exchanged information. These two agents may be then called by the law court to give evidence about the information which they exchanged with the agent in a case of a law trial. Unverifiability is itself a special kind of information problem \cite{6}. Then there exits a subgroup of interacting agents the interaction
of which has the following property: it is nonreducible \cite{5}.
A cell is such a configuration of a number of nonreducible
subgroups in which every two interacting agents belong to two
nonreducibile subgroups /subgroups are connected in this sense/
and which is closed. Such a cell may disappear and may be created,
may change number of nonreducible subgroups in a reversible way.
Because the structure, configuration of interactions between
agents in the group, forms a macroscopic structure we say that it is
a microreversibile process any process within a nonreducibile subgroup
and within a cell. Statistical equilibrium of the whole group
is characterized by a set of different subgroups of the type mentioned
above and by a probability that such a subgroup occurs. Thus we have
probability distribution which characterizes the group. Moreover
there exists an equation of state which enables to compare different
macroscopic states of the group. The statistical equilibrium due
to microreversibility is characterized by the maximum of entropy
and by the minimum of costs of information exchange minus a return.
We used \cite{5}
methods of statistical physics to study social behaviour of agents,
mainly the presence of topological structure of interactions between
agents and its changes, which is the most important property of
the group of agents. There are three empirically observed dependencies of personal radius which enabled us
to characterize the quantities of cells,
faces /nonreducibile subgroups/, vertices /agents/ and bonds /interactions/ \cite{2}.
There exist constrains, such as a fixed number V of agents in
the group, a number E of interactions within the group, a number F
of subgroups which are nonreducibile /faces/, and a number C of cells. Thus
we have a structure which is equivalent to random cellular networks.
Such networks and their evolution were described by Rivier,
\cite{3} and \cite{4}. Rivier applied methods
of statistical mechanics to study these networks.
Three empirically observed dependencies of personal radius dependence on some factors
enabled us \cite{5} to characterize the quantity F which characterizes verification of information.
In this paper we study capital market and social behaviour of agents. Quantity F, and in general $<f>$ the average number of nonreducibile subgroups per cell, is the
quantity which characterizes verification of information and thus contributes to aversion of agent with respect to a risk.
The mix of investments of an agent in a given cell with an average measure A of risk aversion in the cell is found from
extremes of the function FM, the aim function. We find the minimum of the average /per cell/ function $<FM>$. An absolute minimum of the function $<FM>$ corresponds to such a state
in the group of agents in which there is an optimal mix of exchange of information for a given expectation of return and a given expectation of risk on the capital market.
Finding a minimum of the aim function $<FM>$ with respect to the parameter $ <n> $ of the group of agents we find the optimal mix of exchange of information. We assume that the number of nonreducible subgroups F per cell $<f>$ contributes linearly to aversion constant A, $ (A = A_{0} + (<f> - 4).b)$. The crowd /$A_{1} = A_{0} - 4.b $/ and personal /$\approx <f>$ / contributions to the risk aversion of an agent are thus present. We use $\frac{<f>.<n>}{3}$ as an average number of agents per cell.
We studied in this paper capital market and social behaviour of agents. We have formulated model describing behaviour of agents on the capital market.
We have found that a pyramidal structure appears in the group, $<f> = 4$, then $A = A_{0} $.
This type of structure corresponds to hierarchical economy systems.
We have found that when the structure
contains topologically only one cell then $<n> = 6$ and A tends to infinity. Better verification of information leads to expectation of higher returns and thus the acceptable risk is larger.
This type of structure corresponds to market-based economy systems.
In practice both market- and hierarchy- based systems co-exist in modern economy. There exist open markets and there exist large organizations.
We have found that this co-existence coresponds to a state which is the minimum of the aim function is in the range $ 3 < <n> <6$. In our model the market-based system coresponds to a state with $<n> = 6$ and the hierarchy-based szstem to a state with $<n> = 3$.
The mix of investments of an agent in a given cell with an average measure of risk aversion in the cell is found from
extremes of the function FM, the aim function. In our model we included intermediary property of firms - cells. Thus we have formulated a general model for topology of exchange of information and behaviour of agents on capital markets which may take into account intermediary property. We find the minimum of the average /per cell/ function $<FM>$ for the simpler case in which there is zero intermediary. Thus assume that in this paper in our model the individual investors and agents from financial intermediary firms do not differ. More general case will be studied elsewier.
The extreme of the aim function $<FM>$ is minimum for some conditions on the market and in the group of agents. This minimum corresponds to $<n>$ between 3 to 6 as we mentioned above.
This is a stable state. The extreme may be also a maximum. Then there may be two minima of the function FM or one minimum. One of them is that which corresponds to $<n> = 3$ /the pyramidal hierarchical structure/ which is either a metastable state either a stable state,
depending on conditions on the market and in the group of agents.
The other one corresponds to $<n> = 6$ which is a stable state. We call efficient capital markets markets which are of the second type. We have characterized variance of n for the distribution of nonreducibile subgroups. This variance diverges to infinity with $<n>$ tending to 6. Thus the market-based economy system with $ <n> $ near
or equal to has very large variance of nonreducibile subgroup number of agents.
\section{Capital Market and Social Behaviour of Agents}
We know \cite{5} that a personal diameter r increases when F decreases. We generalize this relation to every cell: we assume that personal diameter r increases when $<f>$, the average number of nonreducubile groups per cell, decreases, $<f>$ is an average number of faces per cell in language of topology. Quantity F, and generalizing quantity $<f>$, is the quantity which characterizes verification of information. The larger F, and generalizing $ <f> $, the more verified information in general, e.i. not only a specific information exchanged by members of a subgroup. Aversion of an agent to risk on capital
markets also contains characterization of verification of information. We assume that the number of nonreducible subgroups F per cell, $<f>$, contributes linearly to this aversion constant A:
\begin{equation}
\label{1}
A = A_{0} - 4.b + <f>.b .
\end{equation}
Here $ A_{0}$ is a risk aversion constant for $<f> = 4$ when $A = A_{0} $, b is a constant which characterizes contribution
of the quantity $<f>$ to the risk aversion. The larger quantity $<f>$ the better verification of information, the larger expected return contribution of the investment and thus the larger acceptable risk.
Note that when the pyramidal structure appears in the group, $<f> = 4$, then $A = A_{0} $.
This type of structure corresponds to hierarchical economy systems, for these systems see in \cite{6}. The aversion constant A has minimum value as concerning dependence on the quantity $ <f> $ in this case. Then lower risk is acceptable and correspondingly lower profit is acceptable also, especially for low $ A_{0} $ constant. The constant A does not contain contribution from the verification of information. Due to very low risk inthis case price signals do not work in these systems. Information on resources and aims and objectives, is flowing /exchanging/ through the hierarchy to the decision makers.
Note that when the structure
contains topologically only one cell then $<n> = 6$ and A tends to infinity. Better verification of information leads to expectation of higher returns and thus the acceptable risk is larger.
This type of structure corresponds to market-based economy systems, for these systems see in \cite{6}. They work via price signals.
We interpret $(<f>.b)$ as an average measure of risk aversion of a personal /individual/ contribution to risk aversion different from the crowd contribution of this person $ A_{0} - 4.b $ in the cell, because F and $<f>$ are characteristics of verification of information. Trader - agent supplies or demands goods and services /on the capital market shares or bonds/ if the market price exceeds or undervalues his/her own valuation of shares or bonds. To make own valuation of shares or bonds the agent individually verifies information on it.
In practice, as noted in \cite{6}, both types of system involve interaction of people: in the market system they interact as traders, in the hierarchy as agents within an organization. Both market- and hierarchy- based systems co-exist in modern economy \cite{6}: we have open markets and we also have large organizations such as the joint-stock company.
\section{A financial intermediary}
In market economies the key role of providing financial intermediaries and transaction services play banks and other financial firms. Intermediation process is understood well in a hypothetical market economy by considering how it might function without financial intermediaries \cite{7}. For this economy there is assumed for simplicity that government receipts equal outlays and that the exports of goods and services equal import. It is also assumed that all savings is by households and all investment is by businesses. Household claim on business may be in the form of debt or equity capital. This is minor additional difference from reality \cite{7}. The level of interest rates and the associated valuation of equity in this economy is determined by supply and demand for savings, the influence of any monetary authority is not taken into account \cite{7}. In the absence of intermediaries households would have to hold their savings in the form of equity or debt claims on specific firms.
They are limited in their ability to diversify their holdings by the high costs of obtaining information about many different companies and of dealing with a large volume of small denomination securities, \cite{7}. Thus each household would have to expect the large risk due to particular firms in which it invested. This could result in a loss of most or all of its savings. Such a large risk would lead to a high rate of interest and cost of capital which a household would have to take. Reduction of their risk would lead to households willing to accept a lower rate of interest. The financial intermediaries play a key role, \cite{7}, to reduce the risks faced by households /individual savers/, by pooling their savings and using these to assemble diversified portfolios of assets. Diversification is such that risks of bankruptcy of different firms do not depend on the same economic conditions specific to the firm, industry, geographic region or the entire economy. This diversification requires specialized knowledge and expertise, and a large portfolio. Low, or even negative, correlation among the returns on different assets in portfolio and larger number of these assets reduce risk. Thus intermediaries can pay a lower rate of interest to households. Competition among financial intermediaries leads together with lower rate of interest payed to households to lower costs of financing, which leads to lower costs of financing firm enterprises, \cite{7}.
This hypothetical market economy we will use in this paper to describe intermediary process quantitatively.
In our model of a capital market every point /vertex/ corresponds to an agent acting on the market. In the simplified model economy described above these agents would be households. Financial intermediaries which lead to lower costs will be described in our model as cells with a given risk aversion constant of the firm to which risk aversion constants of agents from this firm - cell should not be too far. Some other cells are cells with zero risk tolerance cell constant, agents in these cells corespond to households /individual savers - agents/. Thus we have in our model not only households but also financial intermediaries description included.
\section{The mix of investment and interaction of agents}
We will assume that every agent has its objective to maximize its expected
utility of wealth \cite{8}. If returns are normally distributed and the investor has constant absolute risk aversion, then expected utility can be written as \cite{9}:
\begin{equation}
\label{2'}
EU = - exp{(-c(E-\frac{c}{2}.EV))}
\end{equation}
where the risk tolerance constant is $c = \frac{1}{A}$, A is the risk aversion constant, E is the expected value of end-of-period wealth, EV is the expected variance of end-of-period wealth. To make this utility as large as possible, one maximizes \cite{10}:
\begin{equation}
\label{2''}
E - \frac{c}{2} . EV.
\end{equation}
This leads to maximum expected utility of an agent, assuming that the risk aversion is constant.
Agents are interacting, some of them correspond to households /individual investors/, some of them are from the financial intermediary firms. In our model we will assume that every agent has the same E / the expected value of end-of-period wealth, in percents/ and the same EV /the expected variance of end-of-period wealth, in percents/. Thus c is a number, and A is also a number. In reality our assumption is not true, there exist dispersion of E and EV quantities. Assuming the same E and EV quantities we assume that there exist a mean value of E and Ev for the dispersion.
We would like to consider not only maximization of expected utility of an agent, but also optimization of interactions of agents in such a way in which interacting agents corresponding to households and to intermediary financial firms maximize their expected utility.
While the first case corresponds to maximization of (\ref{2''}), the second case is more general.
To maximize the expected utility of an agent in fact it is necessary to maximize the function:
\begin{equation}
\label{2'1}
fM = c(E-\frac{c}{2}.EV)
\end{equation}
In this function there is a constant c. This constant is dependent on the constant A which depends on the quantity $<f>$. Optimization of interactions of agents in such a way in which interacting agents corresponding to households and to intermediary financial firms maximize their expected utility leads to optimization of (\ref{2'1}) for every agent taking into account the structure of a cell. The cell may represent agents - households, but also a financial intermediary.
The first one cell will be described in our model as a cell with different aversion constants of agents from this cell, the second one will be described in our model as a cell with almost the same aversion constant of agents from this cell.
To model this fact we will optimize the following function:
\begin{equation}
\label{2'11}
fM = \sum_{i=1}^{i=V_{C}} c_{i}(E-\frac{c_{i}}{2}.EV) - \sum_{i=1}^{i=V_{C}} \gamma (c_{i}-c_{B})^{2}
\end{equation}
Here $V_{C}$ is a number of agents in the cell C.
The first sum is a sum of terms of the type (\ref{2''}), the second term is a sum which describes how far are constants $c_{i}$ of agents from a constant $ c_{B} $ which is characteristic for a given cell. The constant $\gamma$ is a positive or zero constant. If it is zero, then the function fM describes a sum of functions fM for single agents. This correspond to individual investors in the sense that their aversion to risk constant is different, individual. If it is nonzero, then the function fM describes a sum of functions fM for agents which are not individual investors in the sense that their aversion to risk constant is not too much different. These agents are agents from an intermediary financial firm characterized by a constant $ c_{B} $. If the constant $\gamma$ is very large, then all the constants $c_{i}$ from this cell tend to the same value $ c_{B} $ characteristic for the firm.
To interpret the second sum in terms of expected returns and expected variances let us rewrite the function fM (\ref{2'11}) in the form:
\begin{equation}
\label{2'111}
fM = \sum_{i=1}^{i=V_{C}} (c_{i}((E + 2.c_{B}.\gamma)-\frac{c_{i}}{2}.(EV + 2.\gamma)) - c_{B}^{2}. \gamma )
\end{equation}
We see that the second term in (\ref{2'11}) corresponds to higher total expected value $E + 2.c_{B}.\gamma$ in which besides an expected value E of the individual investor a new contribution $2.c_{B}.\gamma$ appears. This contribution is due to the firm financial intermediary, due to diversification which is such that risks of bankruptcy of different businesses do not depend on the same economic conditions specific to the firm, industry, geographic region or the entire economy.
We see that the expected return of an individual investor should be higher to have the same expected value as total expected return /we are taking always a return for a given period/ for the financial intermediary firm. This diversification requires, as written above, specialized knowledge and expertise, and a large portfolio. The higher risk tolerance constant $ c_{B} $ of the firm leads to the higher total expected return.
We see further that the second term in (\ref{2'11}) corresponds to higher total expected risk $EV + 2.\gamma$, in which besides an expected variance EV of the individual investor a new contribution $2.\gamma$ appears. This contribution is again due to firm financial intermediary, due to
diversification which is such that risks of bankruptcy of different businesses do not depend on the same economic conditions specific to the firm, industry, geographic region or the entire economy. This diversification requires, as written above, specialized knowledge and expertise, and a large portfolio thus larger expected risk may be accepted.
We see that the expected risk of an individual investor should be higher to have the same expected risk as total expected risk for the financial intermediary firm.
The stronger tendency /the higher value of the constant $\gamma$ / in the firm to have the same risk tolerance constant $ c_{B} $ for all agents in the firm the higher total expected variance - risk. The constant contribution $- c_{B}^{2}. \gamma$ gives a term in fM which is proportional to the number of agents in the cell - firm. This contribution is larger for larger risk tolerance constant $ c_{B} $ /in absolute value/ and is negative.
Optimization of (\ref{2'111}) for given E, EV and $ c_{B} $ leads to optimization of the number of agents in the cell and to optimization of verification of information which is characterized by f. Correspondingly optimization for the whole group /agents on capital market/ for given E, EV and $ c_{B} $ leads to optimization of the number of agents in the cells and to optimization of verification of information on the market, which is characterized by $<f>$.
Then structure of exchange of information in the group of agents will be such that we obtain the highest value of the function fM.
We see further that the second term in (\ref{2'11}) for $\gamma$ very large leads to all risk tolerance constants in a cell to be the same.
We see also that the second term in (\ref{2'11}) for $\gamma = 0$, e.i. for vanishing $\gamma$,
leads to all risk tolerance constants in a cell to be different.
To obtain an aim function FM for such a cell, we will consider function fM from (\ref{2'11}) with negative sign.
The mix of investment of a single cell in the mean field approximation is found from the function:
\begin{equation}
\label{2}
<FM> = (- c.E_{B} + \frac{c^{2}}{2}EV_{B}) \frac{<f>.<n>}{3} + \gamma . c_{B}^{2}. \frac{<f>.<n>}{3},
\end{equation}
where $E_{B} = E + 2 \gamma c_{B}$ is an expected return for a given investment period modified by a contribution of the return from the cell /firm/ financial intermediary, $EV_{B} = EV + 2 \gamma $ is an expected risk for a given investment period modified by contribution of the risk from the cell /firm/ financial intermediary, E and EV is an expected return and an expected risk respectively for a given investment period. Here $c_{B}$ is a risk tolerance constant given for the cell.
The mix of investment of a given cell /financial intermediary firm/ with an average measure c of risk tolerance of a person /which is inversely equal to a crowd contribution of a person plus a personal /individual/ contribution of this person in the cell/ is found from the average /per cell/ function $<FM>$ above (\ref{2}). Let us now study the case $\gamma = 0$, which does not distinguish between individual investors /for which $\gamma = 0$ / and a financial intermediary firm.
\section{The case in which individual investors and a financial intermediary firm are not distinguished}
The case in which individual investors and a financial intermediary firm are not distinguished we will study now. Thus we study the case in which financial intermediary in the capital market is not considered, all agents of such financial intermediary firm behave as individual investors. The case $\gamma > 0$, which does distinguish between individual investors /for which $\gamma = 0$ / and a financial intermediary firm / for which $\gamma > 0$ / will be studied elsewhere.
Thus the equation for the aim function FM is found from (\ref{2}):
\begin{equation}
\label{3}
<FM> = (- c.E + \frac{c^{2}}{2}EV)\frac{<f>.<n>}{3},
\end{equation}
where $\frac{<f>.<n>}{3}$ is an average number of agents per cell. From (\ref{1}) and (\ref{3}) it has the form, note that $c = \frac{1}{A} = \frac{1}{A_{0} - 4.b + <f>.b}$:
\begin{equation}
\label{4}
<FM> = (- \frac{E}{A_{0} - 4.b + <f>.b} + \frac{EV}{2.(A_{0} - 4.b + <f>.b)^{2}})\frac{<f>.<n>}{3}
\end{equation}
and where for an equilibrium structure with a given number of cell C, of faces F, of interactions E, and of agents V, see \cite{3} and \cite{4}:
\begin{equation}
\label{5}
<f>= \frac{12}{(6-<n>)},
\end{equation}
and where $A_{1} = A_{0} - 4.b$. From the equation for the aim function FM (\ref{4}) we will now find which number of cell C, of faces F and of interactions E it minimizes, here number of agents V is given. It means that there is a minimization of FM with respect to $<n>$, number of interaction in an ireducibile subgroup /face/. Thus we are looking for such a structure of agents on the capital market which gives the lowest value of the aim function FM taking into account their interaction exchanging information. The case which does distinguish between individual investors and a financial intermediary firm is more complicated and will be studied elsewhere.
\section{Extremes of the function FM}
The function FM has extremes. Absolute minimum corresponds to such a state in which there is an optimal mix. There exists an extreme of FM $<n>'$ which is given by:
\begin{equation}
\label{6}
<n>'= 6.\frac{(EV (A_{0}-2b)- 2E(A_{0}-2b)^{2})}{( EV.A_{0}-2E(A_{0}-4b)(A_{0}-2b))}.
\end{equation}
Note that when contribution of nonreducibile subgroups is absent $b=0$, then $<n>'=6$. Thus only
one cell exists with many nonreducibile subgroups in this case.
Note that when the crowd and personal contributions to the risk aversion of an agent are present then $b>0$, $<n>' \neq 6$ and more than one cell maz exist in the group. There are several subgroups -cells. Note that
when the crowd risk aversion is zero, $A_{0} = 4.b $, then:
\begin{equation}
\label{6.1}
<n>'= -3(1 + \frac{4bE}{EV})
\end{equation}
which is less than 3, for E and EV positive. The quantity $ <n>' $ tends to 0 when $A_{0}$ tends to $2.b $. Many cells exist in the group - pyramidal structure may exist
for $ <n>' = 3 $. Thus it is necessary to discuss minima of the aim function
FM.
\section{The extreme is minimum}
The extreme may be an absolute minimum for $A_{0} < 2b$. This is the case when the tendency to verify information is stronger. The extreme (\ref{6}) is now minimum if:
\begin{equation}
\label{7}
2E(2b - A_{0})> EV > E \frac{A_{0}}{b} (2b - A_{0})
\end{equation}
and here the state with $<n> = 6$ has lower value of the aim function than the state $<n> = 3$.
The extreme is also minimum if:
\begin{equation}
\label{7.1}
E \frac{A_{0}}{b} (2b - A_{0}) > EV \geq 2E \frac{A_{0}(2b -A_{0})}{4b - A_{0}},
\end{equation}
and here the state with $<n> = 3$ has now lower value of the aim function than the state $<n> = 6$.
In both cases the extreme is a state in which some type of hierarchical structures exists locally in the group, the group is not hierarchical as whole.
The extreme - minimum gives the average number of agents per nonreducibie subgroup which is less than 6 and larger than 3.
The extreme (\ref{6}) is also minimum if:
\begin{equation}
\label{7.2}
EV > 2E(2b - A_{0}).
\end{equation}
Here however the state with $<n> = 6$ is minimum due to condition that $3 \leq <n> \leq 6$ is not fulfilled for $<n>'$, this extreme is larger than 6.
In this case the minimum of the aim function with $<n> = 6$ is a state in which there is no type of hierarchical structure locally present in the group.
The extreme (\ref{6}) is also minimum if
\begin{equation}
\label{7.3}
0 \leq EV < E \frac{A_{0}}{b} (2b - A_{0}).
\end{equation}
Here the state with $ <n> = 3 $ is minimum due to the condition that $ <n> $ is from the interval $ 3 \leq <n> \leq $, the extreme has lower value than 3. In this case the aim function with $ <n> = 3 $ has minimum value in this interval. The state $ <n> = 3 $ corresponds to an hierarchical state.
\section{The extreme is maximum}
The extreme may be an absolute maximum for $A_{0} > 2b$. In this case the state $<n> = 6$ has always lower value of the aim function than the state with $<n> = 3$.
The extreme (\ref{6}) is maximum if, for $A_{0} > 4b$. :
\begin{equation}
\label{8}
EV > 2E \frac{A_{0}(A_{0}-2b)}{A_{0}-4b}
\end{equation}
and then there exist two states which minimize function FM /e.i. maximize return and minimize risk/.
The first one is with $<n> = 3$.
It gives the average number 3 of agents per nonreducibile subgroup, this number means that there is a pyramidal /hierarchical/ structure.
The second one is with $<n>=6$.
It gives the average number 6 of agents per nonreducibile subgroup, the value of the aim function FM is such that this state is much more stable than the state above /the state corresponding to $<n> = 3$/ which is a metastable state. The number $<n> = 6$ means that there is a one cell structure. Efficient capital markets are markets which are of the second type /strong stability, conservative and aggressive agents are
present on the market, return is high/ in our paper.
The extreme (\ref{6}) is maximum if, for $A_{0} > 4b$. :
\begin{equation}
\label{8.1}
2E \frac{A_{0}(A_{0}-2b)}{A_{0}-4b} > EV > 2E \frac{(A_{0}-4b)(A_{0}-2b)}{A_{0}}
\end{equation}
however then there exists one state which minimize function FM /e.i. maximize return and minimize risk/ for $3 \leq <n> \leq 6$. The state with $<n> = 3$, which was a metastable state is now not the metastable state. The minimum od the aim function FM is for $<n>=6$.
Efficient capital markets are markets which are of this type.
The extreme (\ref{6}) is maximum if, for $4b > A_{0} > 2b$. :
\begin{equation}
\label{8.2}
EV < 2E \frac{A_{0}(A_{0}-2b)}{A_{0}-4b}
\end{equation}
This inequality cannot be fulfilled because right hand side is negative for positive expected returns E and for positive expected risk EV.
The extreme (\ref{6}) is maximum if, for $4b > A_{0} > 2b$. :
\begin{equation}
\label{8.3}
EV > 2E \frac{A_{0}(A_{0}-2b)}{A_{0}-4b}
\end{equation}
This inequality is fulfilled always. Thus the state with $<n>=6$ is now a state with minimum aim function. There is no metastable state in this case.
\section{Aboav relation for the case of capital markets}
Aboav relation \cite{4} for the case of capital markets tells us:
\begin{equation}
\label{9}
n.m(f,n)= 5.f - 11 - K.(f - 1 - n),
\end{equation}
where K is a parameter of the group independent of f. Aboav's law describes how many, $/m(f,n)/$, agents are
present in average in a nonreducibile subgroup in a cell neighbouring to a cell with f nonreducibile subgroups with agent average number n.
Variance of n for the distribution of agents in nonreducibile subgroups of a cell with f nonreducibie subgroups is due to Weaire \cite{4}:
\begin{equation}
\label{10}
<(n - <n>)^{2}> = <n^{2}> + <n>^{2}
\end{equation}
This variance may be calculated from m(f,n) which is equal to n \cite{4} to Weaire and we obtain:
\begin{equation}
\label{11}
<n^{2}> = <n.m(f,n)> = 5.<f> - 11 - K.(<f> - 1 - <n>)
\end{equation}
\begin{center}
$= \frac{60}{6 - <n>} - 11 - K.(\frac{12}{6 - <n>} - 1 - <n>)$.
\end{center}
We see that for states in which $<n>$ tends to 6, the variance diverges as $ \frac{12.(5-K)}{6 - <n>}$.
Note that $- 1 \leq K \leq 2$, \cite{3} and \cite{4}.
Note that structures /stable states/ in both examples on capital market are such that their entropy /informational/ is maximum /risk is minimum/ and their return is maximum minimizing the function FM.
\section{Conclusions}
We studied capital market and social behaviour of agents. Quantity F, and in general $<f>$, is the
quantity which characterizes verification of information and thus contributes to aversion of an agent with respect to a risk. We generalized this relation to every cell: we assumed that personal diameter r increases when $<f>$, the average number of nonreducubile groups per cell, decreases, $<f>$ is an average number of faces per cell. Thus we assumed that aversion of an agent to risk on capital markets also contains characterization of verification of information. We assumed that the number of nonreducible subgroups F per cell, $<f>$, contributes linearly to this aversion constant A.
When the pyramidal structure appears in the group, $<f> = 4$, then $A = A_{0} $.
This type of structure corresponds to hierarchical economy systems. When the structure
contains topologically only one cell then $<n> = 6$ and A tends to infinity. Better verification of information leads to expectation of higher returns and thus the acceptable risk is larger.
This type of structure corresponds to market-based economy systems. They work via price signals.
We interpret $A_{0} - 4.b$ as a kind of measure of risk aversion of crowd and we interpret $(<f>.b)$
as an average measure of risk aversion of a personal /individual/ contribution to risk aversion different from the crowd contribution of this person in the cell.
In practice both types of system involve interaction of people: in the market system they interact as traders, in the hierarchy as agents within an organization. Both market- and hierarchy- based systems co-exist in modern economy: there are open markets and there are large organizations.
This corresponds to our state for which the aim function has its minimum for $ <n> $ from the interval $ 3 < <n> <6$. In our model the market-based system corresponds to a state with $<n> = 6$ and the hierarchy-based to $<n> = 3$, which are boundary cases of this interval for values of $<n>$.
The mix of investments of an agent in a given cell with an average measure A of risk aversion in the cell is found from
extremes of the function FM, the aim function. We find the minimum of the average /per cell/ function $<FM>$. We assume that in our model the individual investors and agents from financial intermediary firms do not differ. More general case will be studied elsewhere.
When $<f> = 4$ then the pyramidal /hierarchical/ structure exists in which there are only nonreducibie subgroups with $n = 3$. Thus we consider this structure as a structure with a uniform risk aversion constant $A_{0}$. Then a
personal contributions to the risk aversion constant of an agent are those contributions which are corresponding
to structures with $<n> > 3$. Thus there is a linear contribution to the risk aversion constant which is proportional to $<f> - 4$. This corresponds to nonuniform risk aversion constants in the group of agents. This contains a personal contribution to
the risk aversion constant.
We use $\frac{<f>.<n>}{3}$ as an average number of agents per cell.
The extreme of $<FM>$ is minimum for some conditions on the market, and in the group of agents, see above. This minimum corresponds to $<n>$ between 3 to 6. This is a stable state. The extreme may be a maximum. Then there are two minima of the function FM, one of them is that which corresponds to $<n> = 3$ /the pyramidal hierarchical structure/ which is a metastable state, and the other one corresponds to $<n> = 6$ /the structure with hexagons in average and with one cell/ which is a stable state. Efficient capital markets are markets which are of the second type /strong stability, conservative and aggressive agents are present on the market, returns are high/ in our paper. Note that under some conditions of agents and the expectations
about the capital market the hierarchy economy system may be more stable than the market-based economy system.
Aboav's law describes how many, $/m(f,n)/$, agents are present in average in nonreducibie subgroups in a cell neighbouring to a cell with f nonredducibile subgroups with agent average number n. This enables us to characterize variance of n for the distribution of nonreducibile subgroups. This variance diverges to infinity with $<n>$ tending to 6.
The financial intermediaries play a key role to reduce the risks faced by individual savers, by pooling their savings and using these to assemble diversified portfolios of assets. This diversification requires specialized knowledge and expertise, and a large portfolio. This will be studied using our model with nonzero $\gamma $ constant corresponding to a firm with a given tolerance risk constant.
\section*{Acknowledgment}
The paper represents a part of results of the VEGA project 1/0495/03.
\begin{thebibliography}{A}
\bibitem{1}
T. Plummer, The Psychology of Technical Analysis, Rev. Ed., Probus Pub.Comp., Chicago-Cambridge, 1993
\bibitem{2}
D. Lewis, The Secret Language of Success, Carroll and Graf Pub. Inc., USA, 1989
\bibitem{3}
N. Rivier, Journal de Physique, C9 N12 T46 (1985) 155
\bibitem{4}
N. Rivier, Physica, 23D (1986) 129
\bibitem{5}
O. Hudak, Topology and Social Behaviour of Agents, http://arXiv.org/abs/cond-mat/0312723 , 2003
\bibitem{6}
I. Molho, The Economics of Information, Blackwell Pub., Oxford, Malden, 1997
\bibitem{7}
A. Grenspan, Commercial Banks and the Central Bank in a Market Economy, in Readings on Financial Institutions and Markets, P.S. Rose editor, 5th ed., R.D. Irwin Inc., Homewood, Boston, 1993, p. 294
\bibitem{8}
J. von Neumann and O. Morgenstern, Theory of Games and Economic Behaviour, 3rd. ed., Princeton University Press, Princeton, 1953
\bibitem{9}
J. Lintner, The Market Price of Risk, Size of Market and Investor�s Risk Aversion, Journal of Business, April (1968)
\bibitem{10}
W.F. Sharpe, Integrated Aset Allocation, Financial Analysts Journal, September -October (1987)
\end{thebibliography}
\end{document} | {
"alphanum_fraction": 0.7660331334,
"avg_line_length": 82.1628392484,
"ext": "tex",
"hexsha": "63165cb656b278a73e670265b618107a38b10a90",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "42eede9867e5795a6fc040b0a7ce92da3ddd3120",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "e-sim/pdf-text-extraction-benchmark",
"max_forks_repo_path": "benchmark/src/test-data/0501/cond-mat0501002/cond-mat0501002.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "42eede9867e5795a6fc040b0a7ce92da3ddd3120",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "e-sim/pdf-text-extraction-benchmark",
"max_issues_repo_path": "benchmark/src/test-data/0501/cond-mat0501002/cond-mat0501002.tex",
"max_line_length": 1877,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "42eede9867e5795a6fc040b0a7ce92da3ddd3120",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "e-sim/pdf-text-extraction-benchmark",
"max_stars_repo_path": "benchmark/src/test-data/0501/cond-mat0501002/cond-mat0501002.tex",
"max_stars_repo_stars_event_max_datetime": "2018-08-23T19:07:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-08-23T19:07:01.000Z",
"num_tokens": 9600,
"size": 39356
} |
% Abstract
\chapter*{Acknowledgments}
\label{sec:acknowledgement}
%\addcontentsline{toc}{chapter}{\nameref{sec:acknowledgement}}
I want to thank ...
| {
"alphanum_fraction": 0.7565789474,
"avg_line_length": 16.8888888889,
"ext": "tex",
"hexsha": "1508542ed461633ca7faec636ce0ee169bab4645",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3075d63177e8ac04ee91784d5b0c56379335740f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "wi1k1n/nrf-accelerations",
"max_forks_repo_path": "writings/overleaf/tex/acknowledgment.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3075d63177e8ac04ee91784d5b0c56379335740f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "wi1k1n/nrf-accelerations",
"max_issues_repo_path": "writings/overleaf/tex/acknowledgment.tex",
"max_line_length": 62,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3075d63177e8ac04ee91784d5b0c56379335740f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "wi1k1n/nrf-accelerations",
"max_stars_repo_path": "writings/overleaf/tex/acknowledgment.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 44,
"size": 152
} |
\chapter{Algorithms}\label{ch:algorithms}
\section{Sample Algorithm}
In Algorithm~\ref{alg1} we show how to calcute $y=x^n$.
\begin{algorithm}
\caption{Calculate $y = x^n$}
\label{alg1}
\begin{algorithmic}
\input{algorithms/yxn.alg}
\end{algorithmic}
\end{algorithm}
\endinput
| {
"alphanum_fraction": 0.7123287671,
"avg_line_length": 19.4666666667,
"ext": "tex",
"hexsha": "27d8be6de59e43334a744760baf494361e857bd4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "52ae9fbd2cf5ca20dce6ad44441cc6ed402bc5b6",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "t33rtha/Building-A-Knowledge-Graph-Using-Twitter-Data",
"max_forks_repo_path": "cseugthesisalgorithms.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "52ae9fbd2cf5ca20dce6ad44441cc6ed402bc5b6",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "t33rtha/Building-A-Knowledge-Graph-Using-Twitter-Data",
"max_issues_repo_path": "cseugthesisalgorithms.tex",
"max_line_length": 55,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "52ae9fbd2cf5ca20dce6ad44441cc6ed402bc5b6",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "t33rtha/Building-A-Knowledge-Graph-Using-Twitter-Data",
"max_stars_repo_path": "cseugthesisalgorithms.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-06T11:04:57.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-06T11:04:57.000Z",
"num_tokens": 98,
"size": 292
} |
\section{Matrix Product States}
MPS closed chain.
\begin{align}
\ket{\psi} &=
\sum_{i_1,\ldots,i_N} \Tr\left(
A_{i_1}\cdots A_{i_N}
\right)
\ket{i_1,\ldots,i_N}
\end{align}
where $A_i$ are $D\times D$ matrices, and $i=1,\ldots, d$ where
$d$ is the dimension of the site Hilbert space.
There are $dND^2$ parameters here to describe a quantum state.
Gapped ground states of local Hamiltonians satisfy the area law for entanglement
entropy.
Can approximate gapped ground states to accuracy $1/\poly(N)$
with MPS with $D$ sublinear in $N$.
A MPS is injective if there exists finite $L_0$ such that
\begin{align}
\tilde{A}_I &=
A_{i_1} A_{i_2} \cdots A_{i_N}
\end{align}
spans the whole space of $D\times D$ matrices.
The question is if these matrices span the entire space of $D\times D$
matrices.
The reason injectivity is important is that you can show that an injective MPS
has the following properties.
\begin{enumerate}
\item Finite correlation length.
\item There are unique gapped ground states of a frustration-free parent
Hamiltonian.
\end{enumerate}
There is a book by Xiao-Gang Wen, Xie Chen, Bei Zeng and one other author which
has a nice discussion of MPS and in particular the relation between MPS and SPT
phases.
There's another formulation called PEPS,
projected entangled pair states.
Consider a chain of maximally entangled pairs which are then projected.
So you have on each site,
imagine you have two kinds of spins.
And then you put the spins on nearby states into singlets,
and then you do a projection.
So here,
this singlet bond,
what that really means,
you can think of it as a maximally entangled state between the Hilbert space on
the two sides.
\begin{align}
\frac{1}{\sqrt{D}} \sum_{\alpha=1}^{D} \ket{\alpha}\ket{\alpha}
\end{align}
where $D$ is some internal bond dimension.
The idea is that each of these are some virtual spin,
and $D$ would be the dimension of the virtual spin.
And then the projection projects from two virtual spins to a physical degree of
freedom of dimension $D$.
So this projection,
if you write it in the following way
\begin{align}
P &= \sum_{i,\alpha,\beta} A_{i,\alpha\beta} \ket{i}\bra{\alpha\beta}
\end{align}
Then this state is actually equivalent to the MPS $\ket{\psi}$.
So the MPS is exactly equivalent to this PEPS when the projection is written
like above.
Something that happened in 2010,
that led to people thinking all 1D phases are classified by $H^2(G, U(1))$,
is that if you take the bond dimension,
and what can be analysed by constant depth circuits,
is the following.
One can show that any MPS with fixed bond dimension can be disentangled
to exponential accuracy by local constant depth circuit.
The fact you can do this is evidence for no bosonic topological order,
meaning no topologically ordered phases of bosons without any symmetry.
Disentangle means toke to the direct product state.
It doesn't prove it of course,
because the bond dimension is assumed to be fixed,
but in general you should consider the bond dimension to grow with system size.
Last time in the paper,
they show the bond dimension is sublinear in $N$,
but not necessarily constant in $N$.
This discovery of disentanglement with fixed bond dimension doesn't prove there
is no topological order in 1D but it does strongly suggest there is no bosonic
topological order in 1D.
Then you could add symmetry in the circuit,
and let the circuit be symmetric,
and then you can show from there that you basically get this PEPS form.
That is,
you can show that any MPS with a fixed bond dimension and symmetry $G$
can be converted into an entangled pair form via a symmetric constant depth
circuit.
Furthermore, each virtual spin is in projective representation of $G$
characterized by this cocycle
$[w] \in H^2(G,U(1))$.
This again,
is evidence that the classification of gapped phases should be
$H^2(G,U(1))$.
The caveat is that we're considering MPS,
and constant depth local circuits that disentangle,
or take any generic MPS to this fixed point form.
I'm not going to go through the exact construction,
but this book has s nice account of it.
I want to describe a different model for thinking about SPT states in terms of
path integrals,
state sums and wave functions that can generalize to every dimension.
This whole discussion about MPS is limited to 1D.
While it's interesting and illuminated,
I'm not going to dwell more on it because it's so specific to 1D.
This is the overview of what happened 10 years ago in 1D that led people to go
further and classify higher dimensions.
\begin{question}
Experimental tests of AKLT?
\end{question}
The spin-1 Heisenberg chain is gapped phase of matter with dangling gapped edge
modes,
and there are experiments.
The Haldane chain is half the reason why Haldane won the Nobel prize,
even though he didn't realize it's an SPT state at the time.
\begin{question}
Is it because of ground state degeneracy?
\end{question}
Every 1D bosonic topological phase is a trivial phase,
if you forget about symmetry.
For fermions,
it's not the case because we have Majorana chains.
Even though it has no edge modes on a ring,
it's still topological.
It's not exactly because of ground state degeneracy,
because the Majorana chain does not have ground state degeneracy on a closed
chain either.
\begin{question}
Why is the second statement evidence?
\end{question}
Every sate,
you can run it through some constant depth symmetric circuit,
and take it not a form like this,
where every one is a projective representation,
with a dangling edge state.
You could run some RG procedure that takes a constant number of steps,
but takes you to this idealized fixed-point form to good accuracy.
\begin{qeustion}
Is the second statement still true for $d=1$.
\end{qeustion}
You can actually get from PEPS to the trivial product state by disentangling
each of these guys,
but he only way to do that is break the symmetry $G$.
But to fully disentangle,
you need that one extra step to break the symmetry.
I encourage to read the relevant chapter in the book by X.G. Wen.
There's also a book by Ignacio Cirac.
\section{Group Cohomology Model (Dijkgraaf-Witten theory)}
I want to turn to a model for SPT state,
first in 1D,
but this model actually generalized to higher dimensions as well.
The idea is that we're going to construct a TQFT that gives SPT states
by constructing the path integral.
Construct a path integral fora TQFT
\begin{align}
Z(M^2, A)
\end{align}
We have a symmetry $G$, so there is a gauge field $A$ for that $G$ symmetry.
The first step is to triangulate spacetime.
Then the next step is define a branching structure.
A branching structure is a local ordering of the vertices.
Another way of saying what it is is to make all edges directed in a way such
that there are no closed loops.
That's all a branching structure is.
For example, take some triangle,
with vertices labelled 0, 1, 2.
Then arrows pointing from lower to higher number vertices on edge,
and you see there're no loops.
Then we introduce an orientation.
You can reverse the arrows,
and you notice you can't rotate one to make it look like the other,
because if you follow two of the arrow,
your thumb points down,
but it points up on the other orientation.
You call one the $+$ orientation and you call the other the $-$ orientation.
The next thing to do is to assign group elements to vertices.
It effectively introduces a gauge field into the problem,
because if I draw a triangle with
$g_0$, $g_1$ and $g_2$ on vertices,
the links between them will be $g_0^{-1}g_1$ from $g_0$ to $g_1$,
$g_{0}^{-1}g_2$ from $g_0$ to $g_2$ and
$g_1^{-1}g_2$ from $g_1$ to $g_2$.
[picture]
This defines a special kind of gauge field,
it's a \emph{flat} gauge field.
That is,
there is no net flux through a plaquette.
For example,
the flux through one triangle is
$A_{01}A_{12}A_{20}=1$
and in fact the product of any loop is 1 with
\begin{align}
\prod_{\textrm{loop}} A = 1
\end{align}
so in fact $A$ is a \emph{trivial} flat gauge.
Then for each 2-simplex (triangle),
we're going to associate a phase factor,
which is an amplitude,
that depends on the group elements.
To each 2-simplex $\Delta^2$, define
\begin{align}
\left[ \nu_2\left( g_0, g_1, g_2 \right) \right]^{S\left( \Delta^2 \right)}
\end{align}
where $\nu_2 \in U(1)$.
I assume the labels are labelled $g_0,g_1,g_2$.
$S$ is the orientation of $\Delta^2$.
\begin{question}
Is this an invertible TQFT?
\end{question}
This will be an invertible TQFT,
and the fact that this is just a $U(1)$ phase is important why it's invertible.
You can generalize it,
but it's significantly more complex to make $U(1)$ not just a phase.
The path integral that we're going to define is defined as follows.
\begin{align}
Z &=
\frac{1}{|G|^{N_V}}
\sum_\left\{ {g_i \right\}}
\prod_{\Delta_2 \ni (i, j, k)}
\left[
\nu_2 \left( g_i, g_j, j_k \right)^{S(\Delta_2)}
\right]
\end{align}
we're taking the product over all 2-simplices $\Delta_2$
each of which have vertices $i,j,k$ in order.
And I'm going to take the product of all these phases $\nu_2$
and I'm going to raise it to the orientation $S\left( \Delta_2 \right)$.
And then I'm going to sum over all possible choices of vertices $\left\{ g_i
\right\}$
and then I'm going to average by dividing by $|G|^{N_V}$
where $N_V$ is the number of vertices and $|G|$ is the number of elements in
$G$.
We'll come to this,
but if you're a mathematician and see this,
you might think we're crazy,
because you'll see the sum is completely useless for defining the path integral,
but the sum is here for physics.
What you find here is that every term is here is the same.
I haven't told you what $\nu_2$ are.
It turns out every term in the term is the same.
We're just summing over all possible labellings.
\begin{question}
$G$ survives permutations?
\end{question}
We have some crazy triangulation,
and I'm just summing over every possible value of $G$.
Every vertex has a group element attached to it,
and each vertex has a different group element.
\begin{question}
Are there as many group elements as vertices?
\end{question}
There are $N_V$ group elements,
which is different from $|G|$.
For example $G=\mathbb{Z}_2$,
then there are $2^{N_V}$.
\begin{question}
How does it connect to the system we're trying to study?
\end{question}
Maybe you should wait.
If you remember in TQFT,
you can define a path integral for spacetime.
If your spacetime has a boundary,
you have a state on the boundary.
This path integral can give a wave function state on the boundary.
Any boundary is going to be a circle,
or a bunch of disconnected circles,
so we can get a wave function on circles that describes SPTs.
And from those circles,
we deduce an exactly solvable Hamiltonian as well.
This gives a topologically invariant path integral,
form which we extract topological invariants.
\begin{question}
Is branching structure the same as consistent orientation?
\end{question}
No, it's more than that.
It's even more than a specific set of orientations.
It's a specific set of ordering of vertices.
\begin{question}
Should neighbouring vertices have the same ordering?
\end{question}
Not necessarily.
\begin{question}
We take all gauge-equivalent into one term?
\end{question}
Yes, this is going to wind up being an average over gauge transformations.
It's gauge equivalent to no $G$,
we haven't defined $\nu_2$ yet.
We'll be able to relax this product of $A=1$ condition,
so we can introduce twists and things wrap around,
and it won't be gauge-equivalent to nothing.
It's also useful to think of $Z$ in the following way,
as a sum over all $g$s normalized
over the exponential of some topological action.
\begin{align}
Z &=
\frac{1}{|G|^{N_V}}
\sum_\left\{ {g_i \right\}}
e^{i S_{\mathrm{top}}\left( \left\{ g_i \right\} \right)}
\end{align}
I'm being a bit sloppy here,
when I say $A$,
I really mean equivalence classes of $[A]$,
by gauge equivalence.
But if we know $ZA$ is gauge invariant,
I don't really need to write the square brackets.
\begin{align}
Z = Z\left( M^2 , [A] \right)
\end{align}
This has a global symmetry,
and what I mean is that the amplitude should be the same as if multiplied by
some global $g$.
Every term in the sum is explicitly the same if I change all $g_j$'s at once by
some global $g$.
\begin{align}
e^{iS\left( \left\{ g_i \right\} \right)}
=
e^{iS\left( \left\{ gg_i \right\} \right)}
\end{align}
which also means
\begin{align}
\nu_2\left( gg_0, gg_1, gg_2 \right) &=
\nu_2\left( g_0, g_1, g_2 \right)
\end{align}
We're assuming $g$ is unitary,
but if it's anti-unitary,
you just introduce a complex conjugate in the above equation.
What this allows is to do is define a 2-cochain.
The reason it defines a 2-cochain is because I can define the usual 2-cochain,
which I introduced last lecture,
which is
\begin{align}
\omega_2(g_1, g_2) &=
\nu_2 (1, g_1, g_1 g_2)
\end{align}
Furthermore,
I can multiply every element by $g_0^{-1}$ so
\begin{align}
\nu_2 (g_0, g_1, g_2) &=
\nu_2 \left(
1, g_0^{-1},g_1, g_{0}^{-1} g_1 g_1^{-1} g_2
\right)\\
&= \omega_2\left( g_0^{-1} g_1, g_1^{-1} g_2 \right)
\end{align}
We thought of 2-cochains as something that takes in 2 group elements and splits
out a phase factor.
But here,
we can think of it as taking 3 group elements and spits out a factor,
but it has some symmetry in the inputs.
$\nu_2$ is called a homogeneous 2-cochain,
whereas $\omega_2$ is an inhomogeneous 2-cochain.
\begin{question}
What's inhomogeneous about it?
\end{question}
If you think about it,
\begin{question}
What is symmetric?
\end{question}
I want the action to be symmetric.
Usually when you talk about a symmetric system,
I say the path integral is a sum over all field configurations.
But I say my system is symmetric if my action has the symmetry.
\begin{question}
Can't we have each equality up to a phase
so the phases cancel instead?
\end{question}
I haven't thought about that too more carefully.
I'm saying that this implies this,
but not the other way around.
You could relax things and consider it more general,
but I'm not sure how to do that in a way that's compatible with locality.
At the very least,
it gives us a way of getting a symmetric path integral.
\begin{question}
Every group element can be mapped to another group element by another group
element.
Why are not all the vertices not connected to each other.
You can always form a side that connects $g_1$ to $g_3$.
What is the point of triangulation?
All group elements are connected to each other.
\end{question}
I don't understand the question.
That's the graph of the group.
You can think if this as defining a gauge field on a manifold.
\begin{question}
We're assuming $\nu_2$ to have a global $g$ symmetry,
not a local $g$ symmetry.
\end{question}
We have a global $g$ symmetry,
which means we want every amplitude independent of $g$,
but there's also a gauge symmetry,
in that I can do a gauge transformation $A$ and not change the path integral.
There's something much stronger that's happening here in that there's also a
local gauge symmetry.
\section{Topological invariance of the path integral}
Now we get to the topological invariance of $Z$.
So far,
we had to triangulate spacetime and add a branching structure.
That's just geometry.
But for topological invariance,
we need it to be independent of geometry and choice of branching structure.
The point is,
that we want $Z$ to be independent of triangulation and branching structure.
that's what we want to demand,
but actually we want to demand something even stringer,
that is the action $e^{iS}$ is topological,
meaning independent of triangulation.
And the way that you can require that something be independent of triangulation,
is given any triangulation,
you can always get to another triangulation by a series of moves,
and these moves are called \emph{Pacher moves}.
Suppose I have two triangles like this.
One Pacher move is to have a rhombus with a diagonal,
and change the diagonal to the other diagonal. This is a 2-2 move.
There is a 1-3 move which inserts a vertex on a triangle,
and draws rays from the new vertex.
It's just pasting a simplex of one higher dimension.
In 2D you have a 2-simplex that is a triangle.
But in one-higher dimension,
you have a tetrahedron.
Think of pasting a tetrahedron onto a triangle and flattening it.
So one way of thinking about Pachner moves is pasting $D+1$-simplices onto your
$D$ simplices and flattening it out.
But we actually need branched Pachner moves,
which also deals with the ordering.
I need some extra conditions.
For example,
in the 2-2 Pachner move,
there would be
\begin{align}
\nu_2 (g_0, g_1, g_2)
\nu_2 (g_0, g_2, g_3)
=
\nu_2 (g_0, g_1, g_3)
\nu_2 (g_1, g_2, g_3)
\end{align}
[picutre]
To be careful,
you should also check the orientation and make sure you put the right complex
conjugation.
For the 1-3 Pachner move,
you would have
\begin{align}
\nu_2(g_0, g_1, g_2) =
\nu_2(g_0, g_1, g_3)
\nu_2(g_1, g_2, g_3)
\nu_2(g_0, g_2, g_3)
\end{align}
But secretly,
they are the same equation,
in fact they are just the 2-cocycle equation that
\begin{align}
d\nu_2 = 1
\end{align}
To make the topological action re-triangulation invariant,
we require that $\nu_2$ to be a 2-cocycle.
And finally,
let's look at the invariant under 2-coboundaries
\subsection{Invariance under coboundaries}
Suppose that we take
\begin{align}
\nu_2 (g_0, g_1, g_2)
\to
\nu_2 \cdot
b_1 (g_0, g_1)
b_1 (g_1, g_2)
\left[ b_1 (g_0, g_2) \right]^*
\end{align}
and I'm going to require
\begin{align}
b_1 (g_1, g_2) &= b_1 (gg_1, gg_2)
\end{align}
And $e^{iS}$ is invariant because each edge (1-simplex) appears in exactly two
2-simplices with opposite induced orientation.
The reason it's invariant, is every single 1-simplex appears with 2 neighbouring
triangles.
For example, if I have a triangulation,
every 1-simplex has 2 neighbouring triangles.
And every 1-simplex has opposite orientation relative to each triangle.
That means that if one triangle changes by this factor $b_1(g_1,g_2)$,
the neighbour is going to change by a similar factor,
but it's going to appear complex conjugated in the neighbour so they're all
going to cancel out.
In terms of inhomogeneous 2-cocycles,
applying $g_0^{-1}$ to each term,
we get
\begin{align}
\nu_2(1, g_0^{-1} g_1, g_{0}^{-1} g_2)
&\to
\nu_2 \cdot
\frac{b_1(1, g_{01}) b_1(1, g_{12})}{b_1(1, g_{02})}
\end{align}
and
\begin{align}
\omega_2(g_{01}, g_{12}) \to
\omega_2
\frac{\epsilon_1(g_{01}) \epsilon_1_1\left( g_{12} \right)}{\epsilon_1\left(
g_{02} \right)}
\end{align}
That means, if we change our 2-coboundary by 2-cocycles,
the topological boundary doesn't change.
That is,
distinct $G$-symmetric path integrals are classified by
\begin{align}
[\nu_2] \in
H^2\left( G, U(1) \right)
= \frac{\mathbb{Z}^2}{B^2}
\end{align}
Actually,
the last step is a bit of a jump.
Let me give more details.
On closed manifolds,
what we defined so far always gives us one with
$Z(M^2)=1$.
That's because what we have is a trivial $G$-bundle,
meaning that
$\prod_{\textrm{loop}}A=1$.
To get non-trivial results we non-trivial flat bundles with
$\prod_{\textrm{loop}}A \ne 1$.
We want it to be flat but we want holomony over non-contractible loops.
One way of doing this is this construction we've defined,
is we can cut our manifold along whatever loop we're interested with having
holomony,
then inserting some $G$-twist when we glue them back together.
Let me draw a picture.
Suppose w have a cylinder,
and we want to put a $h$ branch cut along the cylinder axis.
So then this loop at the end of the cylinder is $\gamma$,
with $\prod_\gamma A = h$.
Then if you roll out the cylinder flat,
you can draw a triangulation like this.
[picture of unrolled cylinder sheet]
The point is that if we have $g_1,g_2,\ldots$ on the bottom edge of the cut,
which we identify with the vertices on the top edge of the cut to get a
cylinder.
To do the twist,
just make the top edge vertices labelled $hg_1, hg_2, \ldots$.
For a closed $M^2$
non-trivial flat bundle,
\begin{align}
|Z(M^2, A)| = 1
\end{align}
but the phase is going to be non-trivial,
with $Z(M^2, A)$ being a gauge-invariant polynomial in $\omega_2$.
For every element of $H^2$,
we're going to get a path integral that will spit out a $U(1)$ phase and if we
look at al possible closed manifolds and all possible closed bundles,
we find that all elements of $H^2(G, U(1))$ corresponds to a distinct path
integral $Z(M^2, A)$.
So there is a one-to-one correspondence between this group and these TQFTs by
picking appropriate fluxes along non-contractible cycles.
\begin{align}
H^2(G, U(1)) \leftrightarrow Z(M^2, A)
\end{align}
I didn't prove that $Z(M^2)=1$ if it's flat and trivial,
because I didn't prove that the sum over $G$ is actually invariant yet.
\begin{question}
What is the definition of $\prod A$?
\end{question}
If you have a torus.
If you take a product of lops on a contractible bundle,
you get 1.
But you get a holomony over a non-contractible loop?
\begin{question}
What if we generalize to cellulations?
\end{question}
well this $\nu_2(g_0, g_1, g_2)$ only makes sense over triangles because it has
3 inputs,
but you could consider cellulations,
but the framework will be a different,
but ultimately you get the same answer.
| {
"alphanum_fraction": 0.7400184843,
"avg_line_length": 32.4437781109,
"ext": "tex",
"hexsha": "52e09f191d153715d4cf96e8ba9fcdc3c0d531f2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "00e4e2b6aba3d03baaec5caa36903e5135b014de",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ehua7365/umdphysnotes",
"max_forks_repo_path": "phys733/lecture19.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "00e4e2b6aba3d03baaec5caa36903e5135b014de",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ehua7365/umdphysnotes",
"max_issues_repo_path": "phys733/lecture19.tex",
"max_line_length": 80,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "00e4e2b6aba3d03baaec5caa36903e5135b014de",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ehua7365/umdphysnotes",
"max_stars_repo_path": "phys733/lecture19.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-11T12:53:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-12-11T12:53:46.000Z",
"num_tokens": 6115,
"size": 21640
} |
\chapter{Introduction}\label{cha:intro}
This chapter gives the reader an introduction to voxels, its uses and ways to create voxels from a model.
It also serves the purpose of motivating the thesis, as well as describing the problem it is out to solve.
Finally, it defines the delimitations of the thesis and presents the company, at which this thesis was conducted at.
\section{Background}
In computer graphics, a 3D model is built up of triangles which describes the surface of the model.
This means it does not store any information to represent the inside of the model.
In order to represent the inside, a volumetric data structure is needed.
Usually, this is done by storing \textit{voxels} in a uniform 3D grid.
A voxel can be described as data at a position in a grid.
This can be any form of data, such as occupancy, color, material or density.
% \textit{Voxel} is a word that has a lot of meaning in computer graphics.
% The simplest form of a voxel is a cube in a 3D-environment, but other representations of voxel data also exist.
% One such representation is marching cubes~\cite{marching-cubes}, where voxels are defined as density functions to a surface.
% This can create smooth surfaces from volume data, which has its uses in medical imaging~\cite{marching-cubes} and 3D-terrain~\cite{marching-cubes-terrain}.
As voxels are just a way to represent volumetric data, they have been used in a wide range of applications, such as global illumination~\cite{crassin-VCT}, medical imaging~\cite{marching-cubes,voxel-medicin} and collision culling~\cite{voxel-collision}.
These applications either store a voxel representation of models (especially true for medical imaging) or need to convert models into voxels.
This process is called \textit{voxelization} and has been widely studied in the past.
Some methods for voxelizations include triangle-box intersection~\cite{SAT-voxelization}, rasterization~\cite{octree-voxelization} and depth buffer~\cite{depth-buffer}.
Recently, a study was published by \call{scanline-voxelization} proposing a new method to voxelize a model.
The basics of the method is to voxelize the model by performing line voxelization at different stages.
This method is the basis of this thesis and will further be called the \textit{optimal scanline}.
% The authors used both real and integer line voxelization based on~\call{voxeltraversal}.
% These two will further be called \textit{Real Line Voxelization} (RLV) and \textit{Integer Line Voxelization} (ILV).
% They mentioned ILV was slightly more efficient ($\sim$3\%), but the actual data was not published.
\newpage
\section{Aim}
This thesis aims to do an investigation of how the line voxelization algorithm affects the performance of the optimal scanline.
This includes both floating-point and integer line voxelizations.
The thesis also investigates the approximation error caused by using the integer versions of the algorithm.
This was done in \cite{scanline-voxelization}, but the authors only compared the voxel count.
Which means if a voxel is moved somewhere else, it would not produce any error.
As such, this thesis aims to measure this error in another metric, which can describe those errors.
\section{Research Questions}
With the aim of the thesis defined, two research questions are formed as a baseline of the thesis.
These are presented below:
\begin{enumerate}
\item \label{que:linealg} Which line voxelization algorithm performs best for the optimal scanline?
\item \label{que:erroraprox} How great is the approximation error of the integer versions of the optimal scanline?
% \item \label{que:combine} Can the different line voxelization be combined to improve performance and accuracy?
\end{enumerate}
A better performance in this case is defined as how fast the execution of the voxelization is.
It is not a metric of the error or the memory usage of the algorithms.
\section{Delimitations}
The implementation of the thesis ran on \textit{Amazon Web Services} (AWS), on a computer running Ubuntu 18.04 with an NVIDIA Tesla T4 graphics card with driver version 440.59.
The rendering of the voxelization was done using OpenGL.
As the focus was not on supporting older devices, the OpenGL version 4.6 was used.
The computing language of choice was CUDA, as such, other languages were not considered for the implementation.
Again, as there is no need to support older devices, CUDA 10.2 was used.
The line algorithms that were evaluated were limited to a voxel traversal algorithm by \call{voxeltraversal}, its integer version and Bresenham.
As Bresenham is predominantly a 2D line drawing algorithm, it was extended to 3D using a method proposed by \call{3d-bresenham}.
The evaluations were also limited to three different models and voxel grid resolutions between 128-2048.
\newpage
\section{Mindroad}
This master thesis was conducted on behalf of MindRoad.
MindRoad is a software company which specializes in embedded systems, web development and mobile applications.
They also provide courses in software development, such as C++, Python, GPU-programming, Linux and driver development.
| {
"alphanum_fraction": 0.7989851678,
"avg_line_length": 70.1917808219,
"ext": "tex",
"hexsha": "5998f5c84bfe54b9cbacfb8b73f901ca38eb9df7",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4e4cb94b2a4ee261b2b9974aa4b20f6643eb6595",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Thraix/MasterThesis",
"max_forks_repo_path": "Thesis/Latex/introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4e4cb94b2a4ee261b2b9974aa4b20f6643eb6595",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Thraix/MasterThesis",
"max_issues_repo_path": "Thesis/Latex/introduction.tex",
"max_line_length": 253,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "4e4cb94b2a4ee261b2b9974aa4b20f6643eb6595",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Thraix/MasterThesis",
"max_stars_repo_path": "Thesis/Latex/introduction.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-16T10:54:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-16T10:54:38.000Z",
"num_tokens": 1169,
"size": 5124
} |
\section*{Teaching}
\begin{tabular}{p{0.25\textwidth} p{0.75\textwidth}}
\textbf{Duke University}: & \\
2019 & Social Reception of Ebola in the DRC (Instructor of Record) \\
2014 & Quantitative Methods in the Social Sciences (Instructor of Record)\\
2009 & Research Methods in Global Health (TA) \\
2009 & Social Determinants of U.S. Health Disparities (TA)\\
\textbf{The Kings University, Instructor of Record for}: & \\
2013 & The Sociology of Gangs and Gang Control \hfill Spring 2013\\
2012 & The Sociology of Gender\\
2012 & The Sociology of Religion \\
2012 & Introduction to Sociology \\
2011 & The Sociology of Deviance and Crime \\
\textbf{Fresno Pacific Seminary}: & \\
2002 Cross Cultural Experience (TA) & \\
\end{tabular} | {
"alphanum_fraction": 0.7343324251,
"avg_line_length": 45.875,
"ext": "tex",
"hexsha": "fdace5a93a29301a8a299838fb9a5df8270c4bcb",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "743bae2de87deef01f4202c50f2a005a9fa885fb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "thebigbird/starter-hugo-academic",
"max_forks_repo_path": "static/files/cv/experience.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "743bae2de87deef01f4202c50f2a005a9fa885fb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "thebigbird/starter-hugo-academic",
"max_issues_repo_path": "static/files/cv/experience.tex",
"max_line_length": 75,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "743bae2de87deef01f4202c50f2a005a9fa885fb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "thebigbird/starter-hugo-academic",
"max_stars_repo_path": "static/files/cv/experience.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 207,
"size": 734
} |
% $Id: func.tex,v 1.8 2002/04/17 20:05:59 ellard Exp $
\documentclass[makeidx,psfig]{article}
\usepackage{ifthen}
\usepackage{makeidx}
\usepackage{psfig}
% \input{macros}
\input{dan}
\input{ant-macros}
\newtheorem{alghead}{Algorithm}[section]
\newtheorem{codehead}{Program}[section]
\makeindex
\setlength{\textheight}{8.0in}
\setlength{\textwidth}{6.5in}
\setlength{\oddsidemargin}{0.0in}
\setlength{\evensidemargin}{0.0in}
\raggedbottom
\title{Functions in {\sc Ant-8}}
\begin{document}
In this document we will see how to build modular code and how
functions and methods in higher-level languages can be written in {\sc
Ant-8}.
\section{Avoiding Repeated Code}
Recall the program {\tt hello.asm} (described in Section \ref{hello}).
Now imagine that you had a program that printed two strings. We could
write such a program by making a copy of most of the {\tt hello.asm}
program so that there were two loops, one for printing the first
string, and the other for printing the second, but this seems a little
wasteful. Now imagine that we wanted to print three or four different
strings-- we could make three or four copies of this code, with slight
changed to print each string, but this would soon become ridiculous.
\begin{figure}
\caption{Source code of {\tt print-2.asm}}
\hrule
\input{Tutorial/print-2}
\hrule
\end{figure}
What we would like to do instead is discover how we can write code so
that similar functionality is implemented once, instead of many times.
In this example, one way we could do this would be to make an extra
loop, outside the printing loop, that printed each string in turn.
This approach would work for this example, but it does not work in
general because it assumes that all we are doing is printing strings.
In the more general case, there might be a lot of other things that
are done by the program, and they might be different from loop to
loop.
What we would like is for the printing loop to be something we can use
any time we need to print a string, no matter what else the program
does. In order to accomplish this, we need to learn some new
techniques.
\begin{figure}
\caption{Source code of {\tt print-3.asm}}
\hrule
\input{Tutorial/print-3}
\hrule
\end{figure}
\subsection{The Return Address}
The most important thing to note about this goal is how execution gets
back to where it was before it jumped to the printing code. This is
accomplished by noting that after a {\tt jmp} instruction is executed,
register {\tt r1} contains the address of the instruction that would
have been executed had the been an ordinary instruction (not a branch
or a jump). This means that after the program jumps to the code for
printing out the string, register {\tt r1} contains the address that
we want to branch back to after the printing is finished. This is
called the {\em return address}. It is important to grab the return
address out of register {\tt r1} immediately, since many instructions
change {\tt r1}.
In this code, we grab the return address and store it in register {\tt
r5}. When we the code is finished printing the string, it branches
back to this address by using the {\tt beq} instruction.
\subsection{Saving and Restoring State}
The code in {\tt print-3} almost accomplishes our goal, but it has an
important flaw-- it changes the contents of registers {\tt r2} through
{\tt r5}. Therefore, we can't just use this code wherever we like,
because when the function returns the values of these registers may be
changed. Note that register {\tt r1} will also be changed by this
piece of code, but since many operations change {\tt r1} anyway, we
don't care so much about this.
One solution to this problem would be to rest of our program so that
it didn't use these registers for anything (as is true in {\tt
print-3}), but this approach quickly becomes unworkable for programs
that have more than a few very simple functions-- there simply aren't
enough registers.
A much more general solution is to {\em preserve} the contents of the
registers used by the function (in this case, registers {\tt r2}
through {\tt r4}) by storing them to memory whenever the function is
called, and then {\em restore} them by loading their values back into
these registers just before the function jumps back to the return
address. To do this, all we need to do is set aside a small amount of
memory to store the contents of these registers in. In this case,
the body of the {\tt print\_str} function uses registers {\tt r2} - {\tt r4},
so we need three bytes of memory to store these values.
This solution still isn't perfect, however, because we need to use one
register to load the address of {\tt print\_str\_mem} into! Since we
use this register to compute where the registers we're saving are
stored in memory, it is overwritten before it can be saved.
Therefore, not {\em all} registers can be preserved and restored using
this scheme.
We have the same problem with storing the return address. This value
must be left in a register, so the program can do the {\tt beq} to
return from the function. We could use another register for this, but
it turns out that this is unnecessary. By being careful and storing
the return address in memory, along with the values of registers {\tt
r2} through {\tt r4}, we can get away with using just one ``scratch''
register, register {\tt r15}.
In program {\tt print-4}, the four bytes of memory after the {\tt
print\_str\_mem} are used to store the preserved values (the return
address, and the contents of registers {\tt r2} - {\tt r4}).
\begin{figure}
\caption{The source code of {\tt print-4.asm}}
\hrule
\input{Tutorial/print-4}
\hrule
\end{figure}
\section{Recursive Functions}
The method of preserving the values of the registers used by a
function in explicit memory locations, as done in {\tt print-4} has
serious drawbacks. First, it requires that memory be set aside to
store the registers for each function, and this memory is always set
aside for this purpose, even when none of the functions are being
called.
More importantly, however, it cannot be used to implement {\em
recursive} functions. A recursive function is a function that calls
itself (either indirectly or directly).
For this section, the recursive function we will use is the function
for computing the $n$'th Fibonacci number. The sequence of Fibonacci
numbers is defined as:
\begin{itemize}
\item Fib(0) = 1
\item Fib(1) = 1
\item Fib(n) = Fib(n-1) + Fib(n-2) if $n > 1$.
\end{itemize}
\subsection{Using Memory as a Stack}
Instead of setting aside a specific area memory for each function, we
will set aside a pool of memory and use it as temporary storage for
all of the functions. At any given moment, we will keep track of what
part of the memory is being used, and what parts are unused.
Keeping track of what parts are used and unused would seem like a
tedious and difficult exercise, but it is not, thanks to a key
observation about the way that functions execute-- if function A calls
function B, then function B must return before function A. Therefore,
we can simply organize our pool of memory as an array. When we call
function A, we can set aside as much of the array as A needs, starting
at the beginning of the array. When function B is called, we can put
its temporary storage immediately after the storage from A. All we
really need to keep track of is how much of the array is in use at any
time.
This data structure, where temporary function records are stacked on
top of each other, is called a {\em stack}. The common operations on
a stack are to {\em push} a value, which means to add it to the end of
the stack, and to {\em pop} a value, which means to remove it from the
end.
\subsection{General Function Linkage}
\subsubsection{Calling a Function}
Before jumping or branching to a function, ...
\subsubsection{Function Preamble}
\begin{enumerate}
\item {\bf Save the return address in register {\tt r4}.}
The {\tt jmp} or branch instruction that invokes the function
saves the return address in register {\tt r1}. Many
instructions overwrite register {\tt r1}, so we must extract
the return address from {\tt r1} before executing any of them,
or else the return address will be lost. To simplify things,
we might as well do this immediately.
By convention, we temporarily save the return address in
register {\tt r4}.
\item {\bf Preserve the return address.}
Store the return address onto the stack. Register {\tt r2}
is used as the stack pointer.
\item {\bf Preserve the registers.}
Store each of the registers that we want to restore later onto
the stack.
\item {\bf Increment the stack pointer.}
Move the stack pointer up, so that if any other functions
are called they start with the stack in the right place.
\end{enumerate}
\subsubsection{Returning From a Function}
\begin{enumerate}
\item {\bf Put the return value (if any) into register {\tt r3}.}
If the function returns a value, by convention the caller will
expect to find it in register {\tt r3}.
\item {\bf Decrement the stack pointer.}
Move the stack pointer back to its previous position,
deallocating the current stack frame.
\item {\bf Restore the return address.}
By convention, we load the return address into register {\tt
r4}.
\item {\bf Restore the registers.}
Load the contents of each of the preserved registers back into
the registers, from the stack. For each {\tt st1} instruction
in the function preamble, there must be a corresponding {\tt
ld1}.
\item {\bf Branch to the return address.}
Using {\tt beq}, branch back to the return address, which
by convention was stored in {\tt r4}.
\end{enumerate}
\subsection{Optimized Function Linkage}
The general method of building stack frames described in the previous
section is always correct, but frequently it is far from optimal. For
example, in the {\tt fib} function, we always save all of the
registers that {\em might} potentially be used by the function, even
though many of these registers are not used. The base case of the
recursion occurs in more than half of the calls to {\tt fib}, so more
than half of the time this is wasted effort. It would be more
efficient to treat the base case separately from the recursive case,
and only do all the work of preserving the registers when actually
necessary.
An example is shown in {\tt fib-2.asm}. The {\tt fib} function is
very simple, and doesn't preserve many registers, but the basic idea
of handling the base case separately is illustrated.
% \begin{figure}
% \caption{The source code of {\tt fib-2.asm}}
% \hrule
% \input{Tutorial/fib-2}
% \hrule
% \end{figure}
\end{document}
| {
"alphanum_fraction": 0.7602361098,
"avg_line_length": 36.9307958478,
"ext": "tex",
"hexsha": "dc8f333affd2e2913bcb2e8bbfb2ceba6ce5a7b8",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-15T04:09:05.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-15T04:09:05.000Z",
"max_forks_repo_head_hexsha": "d85952e3050c352d5d715d9749171a335e6768f7",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "geoffthorpe/ant-architecture",
"max_forks_repo_path": "Documentation/func.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d85952e3050c352d5d715d9749171a335e6768f7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "geoffthorpe/ant-architecture",
"max_issues_repo_path": "Documentation/func.tex",
"max_line_length": 77,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d85952e3050c352d5d715d9749171a335e6768f7",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "geoffthorpe/ant-architecture",
"max_stars_repo_path": "Documentation/func.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2664,
"size": 10673
} |
\documentclass[12pt]{article}
\usepackage[utf8]{inputenc}
\usepackage{fullpage}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{float}
\usepackage{mathtools}
\usepackage[font=small,labelfont=bf]{caption}
\graphicspath{ {images/} }
\usepackage{listings}
\usepackage{color} %red, green, blue, yellow, cyan, magenta, black, white
\definecolor{mygreen}{RGB}{28,172,0} % color values Red, Green, Blue
\definecolor{mylilas}{RGB}{170,55,241}
\usepackage{pdfpages}
\setlength{\parindent}{1cm}
\DeclarePairedDelimiter\abs{\lvert}{\rvert}%
\DeclarePairedDelimiter\norm{\lVert}{\rVert}%
\newcommand{\overbar}[1]{\mkern 1.5mu\overline{\mkern-1.5mu#1\mkern-1.5mu}\mkern 1.5mu}
\linespread{1.25}
\title{Design and Control of a Photonic Neural Network Applied to Low-Latency Classification}
\author{Ethan Gordon '17 \\ \em{[email protected]} \\ \\ Advisor: Paul R. Prucnal \\ \em{[email protected]} \\ \\ Submitted in partial fulfillment \\ of the requirements for the degree of \\ Bachelor of Science in Engineering \\ Department of Electrical Engineering \\ Princeton University \\ \\}
\date{May 8, 2017}
% Default fixed font does not support bold face
\DeclareFixedFont{\ttb}{T1}{txtt}{bx}{n}{12} % for bold
\DeclareFixedFont{\ttm}{T1}{txtt}{m}{n}{12} % for normal
% Custom colors
\definecolor{deepblue}{rgb}{0,0,0.5}
\definecolor{deepred}{rgb}{0.6,0,0}
\definecolor{deepgreen}{rgb}{0,0.5,0}
% Python style for highlighting
\newcommand\pythonstyle{\lstset{
language=Python,
breaklines=true,
basicstyle=\ttm,
otherkeywords={self}, % Add keywords here
keywordstyle=\ttb\color{deepblue},
emph={MyClass,__init__}, % Custom highlighting
emphstyle=\ttb\color{deepred}, % Custom highlighting style
stringstyle=\color{deepgreen},
frame=tb, % Any extra options here
showstringspaces=false %
}}
% Python for external files
\newcommand\pythonexternal[2][]{{
\pythonstyle
\lstinputlisting[#1]{#2}}}
\begin{document}
\maketitle
\newpage
\section*{Honor Statement}
I hereby declare that this Independent Work report represents my own work in accordance with University regulations.
\begin{flushright}
\includegraphics[width=0.2\textwidth]{Signature} \\
Ethan K. Gordon '17
\end{flushright}
\newpage
\begin{centering}
{\LARGE Design and Control of a Photonic Neural Network Applied to Low-Latency Classification} \\
\vspace*{10px}
{\large Ethan Gordon '17} \\
{\em [email protected]} \\
\end{centering}
\section*{Abstract}
(Abstract)
\newpage
\section*{Acknowledgements}
(Acknowledgements) \\
\noindent {\em (Dedication)}
\tableofcontents
\newpage
\section{Background}
\subsection{Motivation}
(Limitations of Electronic / Software Neural Networks)
\subsection{Operating Principles}
%(Requirements for a Neural Network, Weighted Addition.)
\begin{equation}
\vec{x}_{i+1} = f(\vec{w}\cdot\vec{x}_i + b)
\end{equation}
\begin{equation}
S^\dag S = I \implies \begin{bmatrix}
r_c^* & t_c^* \\
t_c^* & r_c^*
\end{bmatrix}
\begin{bmatrix}
r_c & t_c \\
t_c & r_c
\end{bmatrix} = I \implies \begin{cases}
|r_c|^2 + |t_c|^2 = 1 \\
r^*t + rt^* = 0
\end{cases}
\end{equation}
\begin{equation}
\beta = (r\beta + i\sqrt{1 - r^2}\alpha)ae^{i\phi} \implies \beta = \frac{i\sqrt{1-r^2}\alpha a e^{i\phi}}{1 - rae^{i\phi}}
\end{equation}
\begin{equation}
E_{thru} = \frac{1}{\alpha}(r\alpha + i\sqrt{1-r^2}\beta) = r - \frac{(1-r^2)a e^{i\phi}}{1 - rae^{i\phi}} = \frac{r-ae^{i\phi}}{1 - rae^{i\phi}}
\end{equation}
\begin{equation}
P_{thru} = \frac{2r^2(1-cos(\phi))}{1 + r^4 - 2r^2cos(\phi)}
\end{equation}
\begin{equation}
P_{drop} = 1-P_{thru} = \frac{(1-r^2)^2}{1 + r^4 - 2r^2cos(\phi)}
\end{equation}
\begin{equation}
P_{drop} \approx \frac{(\frac{1}{r}-r)^2}{(\Delta\phi)^2 + (\frac{1}{r}-r)^2}
\end{equation}
%(Description of Microring and PIN Physics.)
\subsection{Previous Photonic Networks}
(Princeton's First Neural Network (1-Neuraon, 2-Neuron), and limitations.)
\section{Network Design}
\subsection{Axons and Optical Topology}
(Description of Axons, Lorentzian Nonlinearity)
(Star Topology vs. Hairpin Topology, Previous Star Topology)
\subsection{Feed-Forward and Recurrent Networks}
(Feed-Forward and Recurrent Network Descriptions in Star and Hairpin Topologies.)
\subsection{Experimental Design: A 2-3-1 Feed-Forward Network}
(What it says on the tin.)
\section{Calibration, Control, and Training}
\subsection{Thermal Calibration}
(Description of Thermal Calibration Code and Procedure.)
\subsection{Weight Calibration}
(Description of Weight Calibration Code and Procedure.)
\subsection{Modified Backpropagation}
(Brief summary of backprop, and the changes in procedure required to match network dynamics.
\section{Experimental Results}
(TODO: Flesh Out)
(Probably will contain: ThermalCal Results, WeightCal Results, Backprop, and final classification fidelity.)
\section{Future Work: Mode Division Multiplexing}
(Motivation: Increase Neuron Density)
\subsection{Operating Principles}
(Description of Transverse Modes.)
(Simulated Results.)
\subsection{Experimental Validation}
(Description and Presentation of Experimental Results for MDM coupling.)
\subsection{CHallenges}
(Topology Change: Must use Hairpin Topology)
(Intermodal Mixing, Calibration Difficulty)
\section{Conclusions}
(RF Applications)
(Need for non-thermal modulation for better plasticity.)
\newpage
\begin{thebibliography}{100}
\bibitem{demo} A. Tait, et al., "Demonstration of a silicon photonic neural network," {\em Photonics Society Summer Topical Meeting Series (SUM)}, IEEE, 2016.
\bibitem{control} A. Tait, et al., "Multi-channel control for microring
weight banks," Opt. Express 24, 8895-8906 (2016).
\bibitem{colah} http://colah.github.io/posts/2014-03-NN-Manifolds-Topology/
\bibitem{image} http://colah.github.io/posts/2015-08-Understanding-LSTMs/
\bibitem{lstm} S. Hochreiter and J. Schmidhuber, "Long Short-Term Memory," {\em Neural Computation}, 9(8), 735-1780 (1997).
\bibitem{hopfield} Hopfield, et al., "'Neural' computation of decisions in optimization problems," Biological cybernetics, 52(3), 141-152 (1985).
\bibitem{qubit} M.D. Reed, "Entanglement and Quantum Error Correction with
Superconducting Qubits," PhD Dissertation, Yale University (2013).
\bibitem{rf} K. E. Nolan, et al. "Modulation scheme classification for 4G software radio wireless networks." Proc. IASTED. 2002.
\end{thebibliography}
\newpage
\section{Appendices}
(Code Appendices)
% THIS IS A COMMENT (not seen in final, compiled, report), indicated by %
%% Example of Paragraphs and Matrices
%In these matrices, the parameters $\theta_i$, $\alpha_i$, $d_i$ and $a_i$ represent the joint angle, link twist, link offset and link length, respectively. Therefore, in order to get the appropriate matrix, one need only substitute the corresponding values into each variable. For this assignment, our matrices are computed directly in our code.
%For example, to find the position of the end of the first member, we would only multiply by its matrix:
%\begin{equation}
%\label{eqn:matrices}
%\begin{bmatrix}
% x \\
% y \\
% z \\
%\end{bmatrix}
%= A_1
%\begin{bmatrix}
% x_0 \\
% y_0 \\
% z_0 \\
%\end{bmatrix}
%\end{equation}
%where $x_0$, $y_0$ and $z_0$ are the coordinates of the origin. However, for further links we have to multiply the matrices in the right order:
%\begin{equation}
%\begin{bmatrix}
% x \\
% y \\
% z \\
%\end{bmatrix}
%= A_1A_2A_3A_4
%\begin{bmatrix}
% x_0 \\
% y_0 \\
% z_0 \\
%\end{bmatrix}
%\end{equation}
%The above formula gives the position of the end of the fourth member (Robonaut's fingertip!).
% Adding a Figure:
%\begin{figure}
%\centering
%\includegraphics[width=0.5\textwidth]{quadcopter}
%\caption{A Crude CAD Model of the Syma X11 Quadcopter}
%\label{quadcopter}
%\end{figure}
% Referencing a Figure
% "Blah Blah as you can see in Figure \ref{quadcopter (figure label)}, Blah Blah"
% Make a New Page
%\newpage
% How to add code:
% \lstinputlisting{<file>.m}
% With lines
% \lstinputlisting[firstline=5, lastline=10]{<file>.m}
%This is the bibliography. To make a new item, write \bibitem{good_name_here}. Add %the actual citation directly after this statement in this section.
%Then, when you want to reference it in your text above, just type %\cite{good_name_here} where you want the in-text citation to go above
%\begin{thebibliography}{100}
%\bibitem{taxonomy} Kang, S. B., "Grasp Taxonomy," http://www.cs.cmu.edu/afs/cs/usr/sbk/www/thesis/taxonomy.html
%\bibitem{Kasdin} Kasdin, N. J., and Paley, D. A. (2011). Engineering Dynamics: A Comprehensive Introduction. Princeton, NJ: Princeton University Press.
%\bibitem{doc} Syma. (n.d.). X11/X11C Instruction Manual. Syma.n
%\end{thebibliography}
\end{document}
| {
"alphanum_fraction": 0.7291761493,
"avg_line_length": 31.3857142857,
"ext": "tex",
"hexsha": "23c0fb4ca418e951619b71fb999c4533fb58a9d2",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "076513e9346f4f5b20a5b1e7b3bc7eda5959bc33",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "egordon/thesis",
"max_forks_repo_path": "main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "076513e9346f4f5b20a5b1e7b3bc7eda5959bc33",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "egordon/thesis",
"max_issues_repo_path": "main.tex",
"max_line_length": 346,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "076513e9346f4f5b20a5b1e7b3bc7eda5959bc33",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "egordon/thesis",
"max_stars_repo_path": "main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2685,
"size": 8788
} |
\section{Introduction}
\textbf{Emerging Trends and Challenges.}
Give a one paragraph overview of the domain that
the paper is going to describe a problem in. Make
sure and reference any existing papers that have
named the domain.
Describe in one paragraph why this domain is important.
It is nice if you can give specific examples of other
research or things that are being done in this domain
that are important to society.
\textbf{Open Problem $\Rightarrow$ Name the problem
you are going to address.}
Now, tell the reader what important problem from
this domain has not been solved that you are going
to attack. Stick to one paragraph. Be VERY specific about what general
problem you are solving.
Briefly discuss and cite in a single paragraph other
research done in this domain. Make sure and explain
why the existing research does not address the
problem that you are describing and solving in the
paper.
If you have some metrics that you are going
to use to claim superiority over prior work, you
can introduce them in a single paragraph here.
Explain why the metrics are important and why
when you evaluate the existing work using these
metrics it motivates your new work.
\textbf{Solution Approach $\Rightarrow$ A pithy
heading for your solution.} To address XYZ
problem, we have done QRS. Describe what you
have done and why it is novel.
In Section~\ref{results} we present empirical
data that we have gathered from experiments
showing QRS. Give a 1-paragraph overview of
what experiments you ran and how they showed
you were superior to existing solutions.
This paper provides the following contributions
to the study of XYZ:
\begin{itemize}
\item Pithy sentence describing contribution.
\item Pithy sentence describing contribution 2.
\item Pithy sentence describing contribution 3.
\item We present empirical results that show QRS.
\end{itemize}
% Make sure and update this!
%
The remainder of this paper is organized as follows:
Section~\ref{motivation} describes PQR, which we
use as a motivating example throughout the paper;
Section~\ref{challenges} discusses the challenges that
we faced when ..... ; Section~\ref{solution} covers the our solution to XYZ; Section~\ref{results} presents empirical
results from analyzing TUV; and Section~\ref{conclusion}
presents concluding remarks and lessons learned.
| {
"alphanum_fraction": 0.8,
"avg_line_length": 33.8405797101,
"ext": "tex",
"hexsha": "060aaea9cd12d00416206bfa22dd68dc02b54f5c",
"lang": "TeX",
"max_forks_count": 78,
"max_forks_repo_forks_event_max_datetime": "2022-01-21T10:26:15.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-01-17T20:24:29.000Z",
"max_forks_repo_head_hexsha": "68b7446399f3669d3dfec11eafd07a8a8a84c249",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "CBR250/latex-templates",
"max_forks_repo_path": "latex-templates/ieee-1.8/sections/introduction.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "68b7446399f3669d3dfec11eafd07a8a8a84c249",
"max_issues_repo_issues_event_max_datetime": "2019-05-12T17:03:20.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-03-31T20:41:11.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "CBR250/latex-templates",
"max_issues_repo_path": "latex-templates/ieee-1.8/sections/introduction.tex",
"max_line_length": 117,
"max_stars_count": 190,
"max_stars_repo_head_hexsha": "68b7446399f3669d3dfec11eafd07a8a8a84c249",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "CBR250/latex-templates",
"max_stars_repo_path": "latex-templates/ieee-1.8/sections/introduction.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-01T19:11:23.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-02T19:15:15.000Z",
"num_tokens": 519,
"size": 2335
} |
\chapter{Inference for Statistical Experiments}\label{S:StatExps}
\section{Introduction}\label{S:ExpsIntro}
We formalize the notion of a staistical experiment. Let us first motivate the need for a statistical experiment. Recall that statistical inference or learning is the process of using observations or data to infer the distribution that generated it. A generic question is:
\[
\text{Given realizations from $X_1, X_2, \ldots, X_n \sim$ some unknown DF $F$, how do we infer $F$} ?
\]
However, to make this question tractable or even sensible it is best to restrict ourselves to a particular class or family of DFs that may be assumed to contain the unknown DF $F$.
\begin{definition}[Experiment]
A statistical experiment $\EE{E}$ is a set of probability distributions (DFs, PDFs or PMFs)
$\Pz := \{\P_{\theta} : \theta \in \BB{\Theta} \}$ associated with a RV $X$ and indexed by the set $\BB{\Theta}$.
We refer to $\BB{\Theta}$ as the parameter space or the index set and $d:\BB{\Theta} \rightarrow \Pz$ that associates to each $\theta \in \BB{\Theta}$ a probability $\P_{\theta} \in \Pz$ as the index map:
\[ \BB{\Theta} \ni \theta \mapsto \P_{\theta} \in \Pz \enspace .\]
\end{definition}
\section{Some Common Experiments}
Next, let's formally consider some experiments we have already encountered.
\begin{Exp}[The Fundamental Experiment]\label{Exp:Uniform01}
The `uniformly pick a number in the interval $[0,1]$' experiment is the following singleton family of DFs :
\[
\Pz = \{ \, F(x) = x \BB{1}_{[0,1]}(x) \, \}
\]
where, the only distribution $F(x)$ in the family $\Pz$ is a re-expression of~\eqref{E:Uniform01DF} using the indicator function $\BB{1}_{[0,1]}(x)$. The parameter space of the fundamental experiment is a singleton whose DF is its own inverse, ie.~$F(x) = F^{[-1]}(x)$.
Recall from Exercise~\ref{underMPSA} that this is equivalent to infinitely many independent and identical $\bernoulli(1/2)$ trials, i.e., independently tossing a fair coint infinitely many times.
%The two dimensional parameter space or index set for this experiment is $\BB{\Theta} = \{ -\infty < a < b < \infty \} = \{ (a,b) \in \Rz \times \Rz : a < b \}$, a half-plane.
\end{Exp}
\begin{Exp}[Bernoulli]\label{Exp:Bernoulli}
The `toss 1 times' experiment is the following family of densities (PMFs) :
\[
\Pz = \{ \, f(x; \theta) : \theta \in [0,1] \, \}
\]
where, $f(x; \theta)$ is given in~\eqref{E:Bernoullipdf}. The one dimensional parameter space or index set for this experiment is $\BB{\Theta} = [0,1] \subset \Rz$.
\end{Exp}
\begin{Exp}[Point~Mass]\label{Exp:PointMass}
The `deterministically choose a specific real number' experiment is the following family of DFs :
\[
\Pz = \{ \, F(x; a) : a \in \Rz \, \}
\]
where, $F(x; a)$ is given in~\eqref{E:PointMasscdf}. The one dimensional parameter space or index set for this experiment is $\BB{\Theta} = \Rz$, the entire real line.
\end{Exp}
Note that we can use the PDF's or the DF's to specify the family $\Pz$ of an experiment. When an experiment can be parametrized by finitely many parameters it is said to a {\bf parametric} experiment. \hyperref[Exp:Bernoulli]{Experiment~\ref*{Exp:Bernoulli}} involving discrete RVs as well as \hyperref[Exp:PointMass]{Experiment \ref*{Exp:PointMass}} are {\bf parametric} since they both have only one parameter (the parameter space is one dimensional for Experiments \ref*{Exp:Bernoulli} and \ref*{Exp:PointMass}). The \hyperref[Exp:Uniform01]{Fundamental Experiment \ref*{Exp:Uniform01}} involving the continuous RV of \hyperref[M:Uniform01]{Model \ref*{M:Uniform01}} is also parametric since its parameter space, being a point, is zero-dimensional. The next example is also parametric and involves $(k-1)$-dimensional families of discrete RVs.
\begin{Exp}[{de~Moivre[k]}]\label{Exp:GenDiscrete}
The `pick a number from the set $[k] := \{1,2,\ldots,k\}$ somehow' experiment is the following family of densities (PMFs) :
\[
\Pz = \{ \, f(x; \theta_1,\theta_2,\ldots,\theta_k) : (\theta_1,\theta_2,\ldots,\theta_k) \in \bigtriangleup_k \, \}
\]
where, $f(x; \theta_1,\theta_2,\ldots,\theta_k)$ is any PMF such that
\[
f(x; \theta_1,\theta_2,\ldots,\theta_k) = \theta_x, \qquad x \in \{1,2,\ldots,k\} \ .
\]
The $k-1$ dimensional parameter space $\BB{\Theta}$ is the $k$-Simplex $\bigtriangleup_k$. This as an `exhaustive' experiment since all possible densities over the finite set $[k] := \{1,2,\ldots,k\}$ are being considered that can be thought of as ``the outcome of rolling a convex polyhedral die with $k$ faces and an arbirtary center of mass specified by the $\theta_i$'s.''
\begin{figure}
\caption{Geometry of the $\BB{\Theta}$'s for $\demoivre[k]$ Experiments with $k \in \{1, 2, 3, 4\}$.}
\vspace{5cm}
\end{figure}
\end{Exp}
An experiment with infinite dimensional parameter space $\BB{\Theta}$ is said to be {\bf nonparametric} . Next we consider two nonparametric experiments.
\begin{Exp}[All DFs]\label{Exp:AllDFs}
The `pick a number from the Real line in an arbitrary way' experiment is the following family of distribution functions (DFs) :
\[
\Pz = \{ \, F(x; F) : F~is~a~DF \, \} = \BB{\Theta}
\]
where, the DF $F(x; F)$ is indexed or parameterized by itself. Thus, the parameter space
\[
\BB{\Theta}=\Pz=\{ \text{all DFs}\}
\]
is the infinite dimensional space of {\bf All DFs} ''.
\end{Exp}
Next we consider a {\bf nonparametric} experiment involving continuous RVs.
\begin{Exp}[Sobolev Densities]\label{Exp:Sob}
The `pick a number from the Real line in some reasonable way' experiment is the following family of densities (pdfs) :
\[
\Pz = \left\{ \, f(x; f) : \int(f''(x))^2 < \infty \, \right\} = \BB{\Theta}
\]
where, the density $f(x; f)$ is indexed by itself. Thus, the parameter space $\BB{\Theta}=\Pz$ is the infinite dimensional {\bf Sobolev space} of ``not too wiggly functions''.
\end{Exp}
\section{Typical Decision Problems with Experiments}
Some of the concrete problems involving experiments include:
\begin{itemize}
\item {\bf Simulation:} Often it is necessary to simulate a RV with some specific distribution to gain insight into its features or simulate whole systems such as the air-traffic queues at `London Heathrow' to make better management decisions.
\item {\bf Estimation:}
\begin{enumerate}
\item {\bf Parametric Estimation:} Using samples from some unknown DF $F$ parameterized by some unknown $\theta$, we can estimate $\theta$ from a statistic $T_n$ called the estimator of $\theta$ using one of several methods (maximum likelihood, moment estimation, or parametric bootstrap).
\item {\bf Nonparametric Estimation of the DF:} Based on $n$ IID observations from an unknown DF $F$, we can estimate it under the general assumption that $F \in \{ \text{all DFs} \}$.
\item {\bf Confidence Sets:} We can obtain a $1-\alpha$ confidence set for the point estimates, of the unknown parameter $\theta \in \BB{\Theta}$ or the unknown DF $F \in \{ \text{all DFs} \}$
\end{enumerate}
\item {\bf Hypothesis Testing:} Based on observations from some DF $F$ that is hypothesized to belong to a subset $\BB{\Theta}_0$ of $\BB{\Theta}$ called the space of null hypotheses, we will learn to test (attempt to reject) the falsifiable null hypothesis that $F \in \BB{\Theta}_0 \subset \BB{\Theta}$.
\item $\ldots $
\end{itemize}
\section{Decision Problems and Procedures for Actions}\label{S:Decisions}
Write down the Table from lectures 1 \& 2 giving examples of decision problems, procedures and action spaces for typical estimation, hypothesis testing and prediction problems with associated principles (Maximum Likelihood, Empirical Risk Minimisation where risk is expectation of specific loss functions, etc.) and algorithms (including optimisation (Stochastic)Newton/gradient-descent, etc.).
\vspace{10cm}~\\
| {
"alphanum_fraction": 0.7204769842,
"avg_line_length": 75.7184466019,
"ext": "tex",
"hexsha": "e9c8d1858d506785baa5612b518f8f0ab27ffbe5",
"lang": "TeX",
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2021-07-19T11:28:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-07-18T07:47:52.000Z",
"max_forks_repo_head_hexsha": "edb33db9a05b32645e8337c03729c0b8d02fa728",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "raazesh-sainudiin/computational-statistical-experiments",
"max_forks_repo_path": "matlab/csebook/Experiments.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "edb33db9a05b32645e8337c03729c0b8d02fa728",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "raazesh-sainudiin/computational-statistical-experiments",
"max_issues_repo_path": "matlab/csebook/Experiments.tex",
"max_line_length": 851,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "edb33db9a05b32645e8337c03729c0b8d02fa728",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "raazesh-sainudiin/computational-statistical-experiments",
"max_stars_repo_path": "matlab/csebook/Experiments.tex",
"max_stars_repo_stars_event_max_datetime": "2021-12-14T13:55:18.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-19T07:54:06.000Z",
"num_tokens": 2294,
"size": 7799
} |
\cleardoublepage
\addcontentsline{toc}{chapter}{Declaration}
\chapter*{Declaration}
We Group CSC16-41 do hereby declare that this Project Report is original and has not been published and/or submitted for any other degree award to any other University before.
\begin{table}[!ht]
\centering
\resizebox{\textwidth}{!}{%
\begin{tabular}{|l|l|l|l|}
\hline
\textbf{\#} & \textbf{Names} & \textbf{Registration Number} & \textbf{Signature} \\ \hline
1 & Spice Diana & 16/U/2345/PS & \\ \hline
2 & Ibrahim Abiriga & 16/T/8702/EVE & \\ \hline
3 & Moses Golola & 16/U/9867/PS & \\ \hline
4 & Kim Kardashian & 16/U/324 & \\ \hline
\end{tabular}%
}
\end{table}
\vspace{1.0in}
\noindent
Date: \\
-----------------------------------------------------------------------------------
\newpage
| {
"alphanum_fraction": 0.5020618557,
"avg_line_length": 38.8,
"ext": "tex",
"hexsha": "b51b3c06f3a0991a77baf2a8ffbcd9ada8d48fff",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "110c3ad0b549cc2449c4940a894dc3b9cb2f8abe",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "SekasiDouglas/Latex-Report-Template",
"max_forks_repo_path": "tex/declaration.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "110c3ad0b549cc2449c4940a894dc3b9cb2f8abe",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "SekasiDouglas/Latex-Report-Template",
"max_issues_repo_path": "tex/declaration.tex",
"max_line_length": 175,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "110c3ad0b549cc2449c4940a894dc3b9cb2f8abe",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "SekasiDouglas/Latex-Report-Template",
"max_stars_repo_path": "tex/declaration.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 257,
"size": 970
} |
%
% File acl2020.tex
%
%% Based on the style files for ACL 2020, which were
%% Based on the style files for ACL 2018, NAACL 2018/19, which were
%% Based on the style files for ACL-2015, with some improvements
%% taken from the NAACL-2016 style
%% Based on the style files for ACL-2014, which were, in turn,
%% based on ACL-2013, ACL-2012, ACL-2011, ACL-2010, ACL-IJCNLP-2009,
%% EACL-2009, IJCNLP-2008...
%% Based on the style files for EACL 2006 by
%%[email protected] or [email protected]
%% and that of ACL 08 by Joakim Nivre and Noah Smith
\documentclass[11pt,a4paper]{article}
\usepackage[hyperref]{acl2020}
\usepackage{times}
\usepackage{latexsym}
\usepackage{amsfonts}
\usepackage{amstext}
\usepackage{amsmath}
\usepackage{natbib}
\usepackage{float}
\usepackage[all]{xy}
\renewcommand{\UrlFont}{\ttfamily\small}
% This is not strictly necessary, and may be commented out,
% but it will improve the layout of the manuscript,
% and will typically save some space.
\usepackage{microtype}
%\aclfinalcopy % Uncomment this line for the final submission
%\def\aclpaperid{***} % Enter the acl Paper ID here
%\setlength\titlebox{5cm}
% You can expand the titlebox if you need extra space
% to show all the authors. Please do not make the titlebox
% smaller than 5cm (the original size); we will check this
% in the camera-ready version and ask you to change it back.
\newcommand\BibTeX{B\textsc{ib}\TeX}
\def\QQ{\mathbb Q}
\def\ZZ{\mathbb Z}
\def\RR{\mathbb R}
\def\CC{\mathbb C}
\def\FF{\mathbb F}
\def\NN{\mathbb N}
\def\AA{\mathbb A}
\def\II{\mathbb I}
\def\Cc{\mathcal C}
\def\Dd{\mathcal D}
\def\Pp{\mathcal P}
\title{Graph Convolution Networks and Graph Attention Networks in Relation Classification : Semeval2018 Task 7}
\author{Eli Goldner}
\begin{document}
\maketitle
\begin{abstract}
\end{abstract}
Intuitively, syntactic information in sentences should provide valuable cues
towards relations between entities in a sentence. We take dependency
parses as graphs to provide this syntactic information and leverage this
for relation extraction via different neural networks that operate directly on
graph structures.
\section{Introduction}
Dependency parses provide
the kind of syntactic information, that should be useful for relation extraction,
beyond consideration of sentential data as purely sequential,
while presenting the syntactic data purely in the form
of relations between the words in the sentence, thus affording a graph structure
on the words in the sentence as nodes with the dependencies as edges. Unlike a
consituency parse this does not add lower information nodes in the form of
constituency lables and establishes shorter paths betweek the relevant nodes.
These semantic and syntactic considerations become more pressing in datasets
like the one we consider, namely the SemEval-2018 task 7 dataset, which
involves relation extraction and classification in sentences from scientific paper
abstracts. We observe the efficacy of specialized word embeddings and dependency
parses of these sentences using different {\bf DGL} implementations of
networks which can leverage the dependency graph structures while using
word embeddings as node features, namely graph convolution networks,
and graph attention networks.
\section{Dataset}
The SemEval-2018 task 7 dataset consists of
two subtasks, both sharing the same training data
of 350 annotated abstracts annotated for
relation instances and categories, with different test
sets consisting of 150 scientific abstracts \cite{gabor-etal-2018-semeval}.
The first subtask, 1.1, provides manually annotated entities and the relation
between them for the training data, and the relevant entities in the test data.
For subtask 1.2, the goal is to make entity extractions given ``messier'' data,
in particular with automatically annotated entity occurences.
\cite{macavaney-etal-2018-gu}
\section{Approach}
We borrowed the code for reading and parsing sentences,
evaluating model performance, and passing parameters to the code
from \citet{macavaney-etal-2018-gu}\footnote{https://github.com/Georgetown-IR-Lab/semeval2018-task7}, the differences being the
way we represented dependency graphs to the model, the models we
implemented, and the features provided to models in addition to the
dependency graphs. Using the pre-existing code we extracted the
dependency parses from sentences using spaCy, and converted them to
undirected graphs using NetworkX, similarly we obtained subgraphs
for the subgraphs of entities in the dependency parse along with the
shortest path in the NetworkX.
Converting these to DGL graphs, we use different specialized
word embeddings (trained on Wiki News and arXiv, also borrowed from the
\citet{macavaney-etal-2018-gu} code) as features for graph convolution networks (GCNs)
and graph attention networks (GATs) and observe the results over varying different
hyperparameters. Our code is in this repository\footnote{https://github.com/Eli-Goldner/semeval2018-task7}
with detailed set-up instructions.
\subsection{Graph Convolutional Networks}
GCNs borrow the idea of convolution from CNNs but
use convolution over the graph structure by representing
the graph as its adjacency matrix and considering the feature
representation for each node.
\cite{zhang-etal-2018-graph}.
For node features we take word embeddings
(oriented towards scientific data) trained on
Wiki News \cite{mikolov-etal-2018-advances}
and arXiv \cite{cohan-etal-2018-discourse}.
We experiment with the number of convolution
layers along with considering the entity subgraphs.
\subsection{Graph Attention Network}
{\bf DGL}'s GATConv replaces a GCN's
normalized convolution operation with an attention mechanism
\cite{v2018graph}.
We start with a larger number of heads but narrow
them down to one over the number of layers.
\subsection{Features}
Again, for features we take the undirected dependency graph of sentence
with word embeddings as node features and optionally entity subgraphs
(we couldn't get shortest-path subgraph working).
\section{Results}
To start we trained for 50 epochs with a learning rate of 0.004.
All results reported are on the test set using the GCN model, unless otherwise
specified, all on subtask 1.1 since I had trouble with model performance, perhaps due to timing.
\subsection{Varying Embeddings}
Of Wiki News, arXiv, and merged embeddings, merged performed the best
and was the first to make classifications outside of the ``use'' category.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|}
\hline
Macro-averaged & Precision & Recall & F1 \\ \hline
Wiki News & 12.68\% & 16.66\% & 12.07\% \\ \hline
arXiv & 11.29\% & 16.60\% & 11.78\% \\ \hline
Combined & 36.92\% & 18.46\% & 15.70\% \\ \hline
\end{tabular}
\end{table}
\subsection{Varying Epochs and Learning Rate}
Doubling the epochs on the combined embeddings performed worse, so with the exception of later time issues
we stuck to 50.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|}
\hline
Macro-averaged & Precision & Recall & F1 \\ \hline
100 epochs & 15.95\% & 17.71\% & 13.90\% \\ \hline
\end{tabular}
\end{table}
Doubling and halving the learning rate both resulted in worse performance,
so we stayed with 0.004 from here on.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|}
\hline
Macro-averaged & Precision & Recall & F1 \\ \hline
LR 0.008 & 8.22\% & 16.67\% & 11.01\% \\ \hline
LR 0.002 & 11.67\% & 16.90\% & 11.56\% \\ \hline
\end{tabular}
\end{table}
\subsection{Varying Convolution Layers}
For the GCN we increased the number of layers to five
which resulted in worse performance so we stuck with three.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|}
\hline
Macro-averaged & Precision & Recall & F1 \\ \hline
5 layers & 8.22\% & 16.67\% & 11.01\% \\ \hline
\end{tabular}
\end{table}
\subsection{Adding Subgraph Features}
Testing glitched out when I added these for some reason
but the scores converged more rapidly before the training
quit compared to the current best model, i.e. at epoch 5
on the training set.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|}
\hline
Macro-averaged & Precision & Recall & F1 \\ \hline
Entity subgraphs & 19.08\% & 18.78\% & 14.46\% \\ \hline
No subgraphs & 15.84\% & 16.79\% & 12.02\% \\ \hline
\end{tabular}
\end{table}
\subsection{GAT}
Due to time constraints I only trained the GAT models for 10 epochs,
first using just the main dependency graph, and second with the
added entity subgraphs. The crash happened again with the latter
model at 5 epochs, which seemed to be an issue with having to switch
machines for training. These are the results of both models at 5 epochs.
\begin{table}[H]
\begin{tabular}{|l|l|l|l|}
\hline
Macro-averaged & Precision & Recall & F1 \\ \hline
Entity subgraphs & 19.08\% & 18.78\% & 14.46\% \\ \hline
No subgraphs & 16.56\% & 16.80\% & 12.86\% \\ \hline
\end{tabular}
\end{table}
\section{Future Work}
Training edge embeddings for dependencies.
Georgetown IR found a way to train the embeddings
wanted to do this with the graph nodes but couldn't
Training weights for node feature average instead of naive average.
Additional features for nodes, additional features for edges.
Nodes : (word embedding), part of speech(?),
dep distance from entity a head,
dep distance from entity b head,
dep distance from dep tree root,
node is in shortest dep path between entities
Edges : embedding for dependencies if they exist,
direction,
label of the dependency if no direct embedding,
on shortest path between entities
Many of these inspired by Georgetown model
but add more information to the graph interpretation
--------
More difficult but constituency parses as trees/graphs,
No edge labels now so no edge embeddings - now have to
probably train constituent labels as well,
but better for keeping track of entities.
\section{Conclusion}
\bibliographystyle{acl_natbib}
\bibliography{references}
\end{document}
| {
"alphanum_fraction": 0.759098614,
"avg_line_length": 34.5827586207,
"ext": "tex",
"hexsha": "e83d5bd6c005e1c6cab51dfc93cccb0396adeda6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c77941fc5cdb817b28694c60fb350bfd6be8914e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Eli-Goldner/semeval2018-task7",
"max_forks_repo_path": "Write-up/Goldner_Eli_Final.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c77941fc5cdb817b28694c60fb350bfd6be8914e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Eli-Goldner/semeval2018-task7",
"max_issues_repo_path": "Write-up/Goldner_Eli_Final.tex",
"max_line_length": 127,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c77941fc5cdb817b28694c60fb350bfd6be8914e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Eli-Goldner/semeval2018-task7",
"max_stars_repo_path": "Write-up/Goldner_Eli_Final.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2730,
"size": 10029
} |
% Options for packages loaded elsewhere
\PassOptionsToPackage{unicode}{hyperref}
\PassOptionsToPackage{hyphens}{url}
\PassOptionsToPackage{dvipsnames,svgnames*,x11names*}{xcolor}
%
\documentclass[
11pt,
]{article}
\usepackage{amsmath,amssymb}
\usepackage[]{mathpazo}
\usepackage{ifxetex,ifluatex}
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provide euro and other symbols
\else % if luatex or xetex
\usepackage{unicode-math}
\defaultfontfeatures{Scale=MatchLowercase}
\defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1}
\fi
% Use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\IfFileExists{microtype.sty}{% use microtype if available
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\makeatletter
\@ifundefined{KOMAClassName}{% if non-KOMA class
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}}
}{% if KOMA class
\KOMAoptions{parskip=half}}
\makeatother
\usepackage{xcolor}
\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available
\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}}
\hypersetup{
pdftitle={Readings},
pdfauthor={Alex Stephenson},
colorlinks=true,
linkcolor=Maroon,
filecolor=Maroon,
citecolor=Blue,
urlcolor=blue,
pdfcreator={LaTeX via pandoc}}
\urlstyle{same} % disable monospaced font for URLs
\usepackage[margin=1in]{geometry}
\usepackage{graphicx}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
% Set default figure placement to htbp
\makeatletter
\def\fps@figure{htbp}
\makeatother
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{-\maxdimen} % remove section numbering
\ifluatex
\usepackage{selnolig} % disable illegal ligatures
\fi
\title{Readings}
\author{Alex Stephenson}
\date{Fall 2021}
\begin{document}
\maketitle
\hypertarget{readings}{%
\subsection{Readings}\label{readings}}
\hypertarget{week-1-research-design}{%
\subsubsection{Week 1: Research Design}\label{week-1-research-design}}
Lundberg, Ian, Rebecca Johnson, and Brandon M Stewart. 2021. ``What Is
Your Estimand? Defining the Target Quantity Connects Statistical
Evidence to Theory.'' American Sociological Review: 34.
\url{https://doi.org/10.1177/00031224211004187}
\hypertarget{week-2-potential-outcomes-and-estimands}{%
\subsubsection{Week 2: Potential Outcomes and
Estimands}\label{week-2-potential-outcomes-and-estimands}}
Testa, Paul. 2021. ``10 Types of Treatment Effect You Should Know
About.'' EGAP.
\url{https://egap.org/resource/10-types-of-treatment-effect-you-should-know-about/}
(August 8, 2021).
\hypertarget{week-3-why-randomizestatistical-review}{%
\subsubsection{Week 3: Why Randomize/Statistical
Review}\label{week-3-why-randomizestatistical-review}}
Cunningham, Scott. 2021. Causal Inference: The Mixtape Chapter 2. New
Haven: Yale University Press.
\url{https://mixtape.scunning.com/probability-and-regression.html}
\hypertarget{week-4-regression}{%
\subsubsection{Week 4: Regression}\label{week-4-regression}}
Morgan, Stephen L., and Christopher Winship. 2014. Counterfactuals and
Causal Inference: Methods and Principles for Social Research Chapter 6.
2nd ed.~Cambridge: Cambridge University Press.
\url{http://ebooks.cambridge.org/ref/id/CBO9781107587991} (September 8,
2020).
\hypertarget{week-5-regression}{%
\subsubsection{Week 5: Regression}\label{week-5-regression}}
Wilfahrt, Martha. 2018. ``Precolonial Legacies and Institutional
Congruence in Public Goods Delivery: Evidence from Decentralized West
Africa.'' World Politics 70(2): 239--74.
\hypertarget{week-6-field-experiments}{%
\subsubsection{Week 6: Field
Experiments}\label{week-6-field-experiments}}
Kalla, Joshua L., and David E. Broockman. 2020. ``Reducing Exclusionary
Attitudes through Interpersonal Conversation: Evidence from Three Field
Experiments.'' American Political Science Review 114(2): 410--25.
\hypertarget{week-7-survey-experiments}{%
\subsubsection{Week 7: Survey
Experiments}\label{week-7-survey-experiments}}
Mattes, Michaela, and Jessica L. P. Weeks. 2019. ``Hawks, Doves, and
Peace: An Experimental Approach.'' American Journal of Political Science
63(1): 53--66.
\hypertarget{week-8-spillovers-and-non-compliance}{%
\subsubsection{Week 8: Spillovers and
non-compliance}\label{week-8-spillovers-and-non-compliance}}
Banerjee, Abhijit, Arun G Chandrasekhar, Esther Duflo, and Matthew O
Jackson. 2019. ``Using Gossips to Spread Information: Theory and
Evidence from Two Randomized Controlled Trials.'' The Review of Economic
Studies 86(6): 2453--90.
\url{https://web.stanford.edu/~arungc/BCDJ_gossip.pdf}
Gerber, Alan S., and Donald P. Green. 2011. ``Field Experiments and
Natural Experiments. Parts 5-6'' The Oxford Handbook of Political
Science.
\url{https://www.oxfordhandbooks.com/view/10.1093/oxfordhb/9780199604456.001.0001/oxfordhb-9780199604456-e-050}
(August 8, 2021).
\hypertarget{week-9-selection-on-observables}{%
\subsubsection{Week 9: Selection on
Observables}\label{week-9-selection-on-observables}}
Imai, Kosuke, and James Lo. 2021. ``Robustness of Empirical Evidence for
the Democratic Peace: A Nonparametric Sensitivity Analysis.''
International Organization 75(3): 901--19.
\url{https://imai.fas.harvard.edu/research/files/dempeace.pdf}
\hypertarget{week-10-fixed-effects}{%
\subsubsection{Week 10: Fixed Effects}\label{week-10-fixed-effects}}
Kropko, Jonathan, and Robert Kubinec. 2020. ``Interpretation and
Identification of Within-Unit and Cross-Sectional Variation in Panel
Data Models.'' PLOS ONE 15(4): e0231349.
\hypertarget{week-11-what-is-a-natural-experiment}{%
\subsubsection{Week 11: What is a ``Natural''
Experiment}\label{week-11-what-is-a-natural-experiment}}
Titiunik, Rocio. 2020. ``Natural Experiments.'' arXiv:2002.00202
{[}econ, stat{]}. \url{http://arxiv.org/abs/2002.00202} (February 16,
2020).
Sekhon, Jasjeet S., and Rocío Titiunik. 2012. ``When Natural Experiments
Are Neither Natural nor Experiments.'' American Political Science Review
106(1): 35--57.
\url{https://www.cambridge.org/core/journals/american-political-science-review/article/abs/when-natural-experiments-are-neither-natural-nor-experiments/E8A67829C2EEBC429CDD671B4C9313F9}
Hyde, Susan D. 2007. ``The Observer Effect in International Politics:
Evidence from a Natural Experiment.'' World Politics 60(1): 37--63.
\url{https://www.cambridge.org/core/journals/world-politics/article/abs/observer-effect-in-international-politics-evidence-from-a-natural-experiment/B72409C4FB717F72CAB765024468511F}
\hypertarget{week-12-regression-discontinuity-designs}{%
\subsubsection{Week 12: Regression Discontinuity
Designs}\label{week-12-regression-discontinuity-designs}}
Cattaneo, Matias D., Nicolás Idrobo, and Rocío Titiunik. 2019. A
Practical Introduction to Regression Discontinuity Designs: Foundations.
1st ed.~Cambridge University Press.
\url{https://www.cambridge.org/core/product/identifier/9781108684606/type/element}
(December 11, 2019).
\hypertarget{week-13-instrumental-variables}{%
\subsubsection{Week 13: Instrumental
Variables}\label{week-13-instrumental-variables}}
van der Windt, Peter. 2021. ``10 Things to Know About the Local Average
Treatment Effect.'' EGAP.
\url{https://egap.org/resource/10-things-to-know-about-the-local-average-treatment-effect/}
(August 8, 2021).
\hypertarget{week-14-difference-in-differences}{%
\subsubsection{Week 14: Difference in
Differences}\label{week-14-difference-in-differences}}
Wing, Coady, Kosali Simon, and Ricardo A. Bello-Gomez. 2018. ``Designing
Difference in Difference Studies: Best Practices for Public Health
Policy Research.'' Annual Review of Public Health 39(1): 453--69.
\end{document}
| {
"alphanum_fraction": 0.779520076,
"avg_line_length": 38.9722222222,
"ext": "tex",
"hexsha": "acd66a7a3ea2a3151cd850321e60f4f1b332f6e8",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f88e69470c011a165163f709abf068ca8c4b98be",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "asteves/BerkeleyAppliedCausalInference",
"max_forks_repo_path": "course_materials/syllabus/ReadingList.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f88e69470c011a165163f709abf068ca8c4b98be",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "asteves/BerkeleyAppliedCausalInference",
"max_issues_repo_path": "course_materials/syllabus/ReadingList.tex",
"max_line_length": 185,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f88e69470c011a165163f709abf068ca8c4b98be",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "asteves/BerkeleyAppliedCausalInference",
"max_stars_repo_path": "course_materials/syllabus/ReadingList.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2591,
"size": 8418
} |
\documentclass[12pt]{article}
\usepackage[margin=1cm,left=2cm,includefoot]{geometry}
\usepackage{graphicx}
\usepackage{indentfirst} %indent fisrt line in section
\usepackage{subfig} %for mutiple figure strcutures
\usepackage[nottoc]{tocbibind}
\usepackage[numbers,sort&compress]{natbib} %for cite to give ranges
\usepackage[onehalfspacing]{setspace} %1.5 linespacing (according to stackexchange - not really 1.5...)
\usepackage{listings}
\begin{document}
\begin{titlepage}
\begin{center}
\begin{figure}[h]
\centering
\includegraphics[scale=1]{img/asd}
\end{figure}
\vspace{2cm}
\huge{\bfseries Brain lesion detection using neural networks}\\
\vspace{2cm}
\large{Neural networks course project}\\
\vspace{5cm}
\end{center}
\begin{flushright}
\large Done by: EKSfmu-16 gr. st. Arūnas Butkus
\linebreak
\large Checked by: prof. dr. Artūras Serackis
\end{flushright}
\vspace{6cm}
\begin{center}
\textsc{Vilnius, 2017}
\end{center}
\end{titlepage}
\section{Introduction}
\label{sec:intro}
Magnetic resonance imaging (MRI) scans allows seeing the situation within the body. It is primary means of seeing if there are any issues with the brain. The scan in itself does not tell if there is an issue and a trained medic needs to determine if there is an issue. One of such issues is the blood spill in the brain – lesion for short. There are cases where lesions are small and hard to notice and require an expert to spot them; sometimes they are a huge glob.
So the goal here is to see if it is possible to implement neural networks to at least classify if there is an issue, and ideally mark the lesion area. And here I look through some MATLAB solutions to at least similar problems proposed by others, looking for a method that might work to some degree.
I explore existing methods in MRI scan segmentation and/or delineation. Starting with a method utilizing Statistical Parametric Mapping (SPM12) toolbox for MATLAB, working on 3-dimensional MRI scans. Then explore few other methods 2D images, slices, of MRI scans.
\section{Analysis of existing volumetric delineation algorithm}
\label{sec:griffisLesion}
Theres is a variety of automatic and semi-automatic algorithms for some form of lesion or some other specific brain region delineation \cite{griffis2016voxel, ashton1997novel, de2015fast, li2015local, harmouche2015probabilistic, petoe2014template, gillebert2014automated, elliott2013temporally, llado2012automated, renz2011accuracy, chen2008voxelwise}. I started of by analysing Joseph C. Griffis algorithm, \texttt{lesion\_gnb.m}, for lesion area delineation \cite{griffis2016voxel}. It is run using MATLAB™ and, as provided \cite{griffisSrcDLweb}, works with any MRI scans presented as .nii files while supplying extra configuration variables during runtime. Algorithm utilizes functions provided by SPM12 toolbox, which is available for free on the internet \cite{spm12DL}. When running the script at first it throws an input box asking, should the segmentation be performed on the MRI scan. Input is either "Y" or "N". If "N" then MRI scan segmentation is skipped.
In general, when running for the first time, user would have to perform the segmentation, thus selecting "Y". Next dialogue asks to pick a directory where all the files will be put. The one after that asks to provide the MRI scan file. Thus processing of the MRI scan begins with its segmentation into grey matter, white matter and cerebrospinal fluid (CSF). For this purpose unified segmentation algorithm, which is provided by the SPM12 toolbox, is used.
\subsection{Unified segmentation}
\label{ssec:unifiedSeg}
When attempting to segment brain images into certain classes two approaches can usually be taken: tissue classification or registration with a template. Tissue classification method assigns voxels to a tissue class according to their intensities. For this to work the intensities of each tissue class needs to be characterized, which is usually achieved by choosing specific voxels to represent each class \cite{zijdenbos1993brain, alfano2000automated, ballester2000segmentation, van2001automated, kwan1996extensible, kwan1999mri, taylor1994image}. Automatic way this is done is by first mapping the brain to some standard space and automatically selecting voxels that have a high probability of belonging to each of the class. A similar approach is to model the intensity distribution by a mixture of Gaussians, while using tissue probability maps to weigh the classification according to Bayes rule \cite{domingos1997optimality, raizada2013smoothness, rish2001analysis}. Registration with a template method involves some specific type of registration where template brain is warped to match the brain T1w scan to be segmented \cite{collins1995automatic, crinion2007spatial, ripolles2012analysis}. Not necessarily the volume matching methods are to be used here as methods that are based on matching surfaces \cite{macdonald2000automated, pitiot2004expert, ashton2001automated} would also work in this category. These methods work by overlaying regions that are predefined on the templates, thus allowing different structures to be identified automatically. Unified segmentation uses both the tissue classification and registration with template methods for more accurate segmentation of an MRI scan.
To start tissue classification the images need to be registered with tissue probability maps \cite{ashburner1999nonlinear}. After registration these maps represent the prior probability of different tissue classes being found at each location in an image. Bayes rule can then be used to combine these priors with tissue type probabilities derived from voxel intensities to provide posterior probability \cite{Ashburner2005}. This procedure is circular – registration requires initial tissue classification and tissue classification requires initial registration. To resolve this a single generative model is used. This model also includes parameters accounting for the image intensity nonuniformity for both segmentation and registration. To find these parameters algorithm alternates between classification, bias correction and registration steps which provides better results than serial application of each component.
To account for image nonuniformity parametric bias correction is used. Many bias correction models are based on modelling the intensities of different tissues as a mixture of Gaussians. There are three commonly used models of how the bias interacts with noise. First is when the resulting signal ($y_i$) is an original signal ($\mu_i$), scaled by some bias ($\rho_i$) with added Gaussian noise ($n_i$) that does not depend on bias \cite{shattuck2001magnetic}. This assumes that the noise if from MRI scanner itself (\ref{eq:bias1}).
\begin{equation}
\label{eq:bias1}
y_i=\mu_i/\rho_i+n_i
\end{equation}
Second model, which is used by the unified segmentation, is similar to first one except the noise is added before the signal is scaled, which implies that the noise is due to variation in tissue properties (\ref{eq:bias2}). There is an option of accounting for both tissue and scanner noise, which is likely to be a better option especially for the images that have large amounts of bias. However, unified segmentation uses in the SPM12 uses single source model.
\begin{equation}
\label{eq:bias2}
y_i=(\mu_i+n_i)/\rho_i
\end{equation}
Third method applies logarithmic transformation to the data first which then allows multiplicative bias to be modeled as an additive effect in logarithmic space. The cost function for these approaches is related to the entropy of the distribution of log-transformed bias corrected data. As with the non-parametric model based on log-transformed data, low intensity voxels have to be excluded to avoid numerical problems. The generative model is of a form similar to one given in equation (\ref{eq:biasLog}) which then gives a exponential multiplication in resulting signal function (\ref{eq:bias3})
\begin{equation}
\label{eq:biasLog}
\log y_i = \log mu_i - \log \rho_i + n_i
\end{equation}
\begin{equation}
\label{eq:bias3}
y_i = \mu_i e^(n_i) / \rho_i
\end{equation}
Another parameter used in segmentation is priors probabilities determination. Rather than using stationary values based on mixing proportions, additional information is used utilizing information from other subjects’ brain images. Usually priors are generated by registering a large set of brain images and averaging resulting tissue classes. This gives a set of tissue probability maps – white matter, gray matter and CSF – representing probabilities any of the matter types of being in specific areas of a brain.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/segm}
\caption{Research results obtained using unified segmentation: patient MRI scan slice (A), gray matter TPM (B), white matter TPM (C) and CSF TPM (D)}
\label{fig:segm}
\end{figure}
\subsection{Generation of lesion probability map}
\label{ssec:lesionGen}
After unified segmentation is applied to the brain, multiple images are saved to the provided directory. To be exact it contains 20 files, including the original MRI scan. Of these initially 3 files are used - warped gray matter tissue probability map (TPM), warped white matter TPM and warped CSF TPM. These TPMs are warped to match a default template provided by SPM toolbox. Warping is used because subsequent processing requires a comparison with a healthy brain half and/or standard template and for that the rough shape of the scans must align. This is part of the reason why this algorithm does not work for small lesions.
Next dialogue asks to provide directory containing MRI scan segments. This dialogue is thrown because this is the next piece of code to be executed after sectioning or skipping sectioning in the initial dialogue. This is the first of many optimization issues present in the script. So, in any case, selecting the directory with the segments throws a dialogue with the main settings selection for the algorithm.
This new dialogue has 6 fields to be edited. First one is for naming folder in which results will be placed, whose function is self-explanatory. Next is a selection whether to search for lesion is in the left hemisphere, indicated by providing character "L", or in the right hemisphere, indicated by character "R". This points out one more limitation of this algorithm – neither can it automatically determine which hemisphere has the lesion, nor find lesions that span both hemispheres. Third field is for smoothing kernel full width at half maximum (FWHM) selection. Default suggested value is 8, which is used for performance/results reference later. Next field asks if whether the unaffected hemisphere should be used for lesion area detection. Fifth field is for selecting prior lesion probabilities. Last field is implicit mask for smoothing value which is 0 by default.
After gathering these values and doing some shuffling into a single class variable, subscript for feature extraction is run. Processing in this subscript starts with smoothing of all the elements to be used (TPMs and SPM12 template/PPM) applying the smoothing kernel FWHM value provided in earlier dialogue. Following more variable shuffling, brain mask from SPM12 toolbox is loaded. This mask is used to filter out noise that can appear outside of brain before and after some processing steps; for example: to remove data points appearing outside of brain after smoothing TPMs. Next up all required layers are made for the affected hemisphere. As an example, assuming that the left hemisphere is affected, three left hemisphere probability maps are created for gray matter, white matter and CSF each. First is the left hemisphere only, all other data points filters out, of the smoothed TPMs made by segmenting. Second is right hemisphere only, smoothed TPMs flipped to overlay the left hemisphere. This one is used only if selected in the previous settings dialogue, however is created either way. Third is left hemisphere of the smoothed SPM12 template/PPM.
Once all the TPMs and PPMs are ready feature maps for missing tissue ($F_1$) and abnormal tissue ($F_2$) are created. The missing tissue map provides information about areas where brain tissues are missing. To find this area a feature of SPM12 segmentation is used – segmentation algorithms tend to classify chronic stroke lesions as CSF due to missing tissue voxels being assigned low gray or white matter probability values \cite{seghier2008lesion, wilke2011manual}. So in the end the missing tissue area is obtained from the average of two image volumes using Eq. \ref{eq:f1diffEq1} and Eq. \ref{eq:f1diffEq2}.
\begin{equation}
\label{eq:f1diffEq1}
(CSF_{Affected}-CSF_{Unaffected})*((GM_{Unaffected}+WM_{Unaffected})-(GM_{Affected}+WM_{Affected}))
\end{equation}
\begin{equation}
\label{eq:f1diffEq2}
(CSF_{Affected}-CSF_{Prior})*((GM_{Prior}+WM_{Prior})-(GM_{Affected}+WM_{Affected}))
\end{equation}
Both of these equations are used if user indicates to use unaffected hemisphere in previous dialogue. Otherwise only Eq. \ref{eq:f1diffEq2} is used. When using both equations, before saving result as missing tissue map both the equations results are averaged. Averaging is rationalized proving two points: using them as separate predictors would be sub-optimal since both volumes contain highly redundant information as both volumes are expected to have lesion values in same areas; averaging them retains the values of concordant voxels, while reducing the values of discordant voxels (e.g. false positives due to inter-hemispheric or inter-individual variability)\cite{griffis2016voxel}.
Once the missing tissue map is created, next is abnormal tissue map ($F_2$). It provides information about abnormal tissue and is motivated by the observation that SPM12 segmentation tends to classify these tissues as gray matter due to the T1w signal intensities being similar to those observes in healthy gray matter \cite{mehta2003evaluation}. Overall system is same as in missing tissue extraction, just uses different equations (\ref{eq:f2diffEq1}, \ref{eq:f2diffEq2}). An example of resulting missing and abnormal tissue maps is in figure \ref{fig:f1f2brain}.
\begin{equation}
\label{eq:f2diffEq1}
(GM_{Affected}-GM_{Unaffected})*(WM_{Unaffected}-WM_{Affected})
\end{equation}
\begin{equation}
\label{eq:f2diffEq2}
(GM_{Affected}-GM_{Prior})*(WM_{Prior}-WM_{Affected})
\end{equation}
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/f1f2brians}
\caption{Patient MRI scan slice (A), with missing tissue map overlaid (B) and abnormal tissue map overlaid(C)}
\label{fig:f1f2brain}
\end{figure}
At the end of feature extraction subscript, both feature maps are saved to files along with MATLAB features magnitudes variable and their corresponding indexes variable. This is followed by classification subscript. By default this uses previously trained classifier to predict full area affected by the lesion \cite{pereira2009machine}. Training was performed with 29 cases and tested on a single case left out of the training \cite{griffis2016voxel}. Prediction is executed utilizing MATLAB function "predict", supplying it with the trained system and features magnitudes variables saved in feature extraction subscript. This function computes the k-step ahead prediction returning, in this case, labels and posterior. However, these resulting feature maps are noisy and imprecise (Fig. \ref{fig:shitylesions}B). To refine the results some post-processing algorithms are employed next.
Next dialogue to be thrown informs that delineation is complete and suggests applying post-processing. Standard "Y/N" choice is provided and if not performing any post-processing script exits. Results at that point can be found in \texttt{f1.nii}, \texttt{f2.nii}, \texttt{lesion\_labels.nii} and \texttt{lesion\_posterior.nii} files. If selecting to apply post-processing, next dialogue asks whether FWHM smoothing should be performed and what size smoothing kernel should be applied; default is 8. Then a dialogue for picking if the implicit masking should be used. Third dialogue is for clustering algorithm – should minimum lesion cluster size algorithm be applied and what is the minimum size per cluster. This comes with a recommendation of 100 voxels per cluster, which is another reason why this algorithm, using default values, could not find small lesions.
These dialogues are thrown intermittently throughout the code that is being executed in the post-processing step. First to be executed is smoothing. Lesion probability map (\texttt{lesion\_label.nii}) is processed according to the supplied by the user smoothing kernel FWHM and then thresholded to retain values in voxels with magnitudes above 0.25. This is intended to close gaps, smooth rough edges and degrade small isolated lesion clusters given by predictor \cite{griffis2016voxel}. This is followed by clustering algorithm, which is used to further remove small clusters of lesions. By default minimum cluster size is 100, which means if a total number of voxels in a cluster is less than 100, those voxels are removed from the lesion map.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/shitylesions}
\caption{Griffis lesion delineation algorithm results: depiction of hand drawn lesion area (A), lesion area pre post-processing (B), lesion area after post-processing (C)}
\label{fig:shitylesions}
\end{figure}
This concludes the run of Griffis lesion delineation algorithm with default values using his trained predictor system. For the first test case, upon visual inspection, it finds excessive amounts of areas affected by the lesion (Fig. \ref{fig:shitylesions}C). Following subsection analyzes obtained results when running the algorithm "as is", how the algorithm can be improved and comparison between the two.
\subsection{Default results, algorithm improvement and comparison between them}
\label{ssec:griffisResults}
Griffis lesion delineation algorithm is neither accurate, nor is it optimized. Accuracy depends on the type of data fed to the algorithm and practice in picking settings like smoothing kernel FWHM. I call the script not optimized, because it often performs calculations that are irrelevant to subsequent calculations and even saves some of this extraneous data onto the hard drive. This significantly increases processing time and wastes storage space – in above mentioned case all data generated by this algorithm for one patient takes 334 MB (including 8MB of initial MRI scan).
\begin{equation}
\label{eq:dsc}
DSC=\frac{2|X\cap Y|}{|X|+|Y|}
\end{equation}
The main metric used for these results evaluation will be Dice-Sorensen Coefficient, otherwise known as Dice Similarity Coefficient, or DSC for short \cite{dice1945measures}, which shows the similarity between two sets of data. $X$ and $Y$ in Eq. \ref{eq:dsc} refer to the two sets of data. DSC is obtained by multiplying the number of overlapping points by two and dividing that by the sum of all data points in both sets. In analysis of lesion delineation algorithm results, first data set corresponds to manually delineated lesion area three dimensional matrix which has values of either 0 or 1, with 1 indicating that the lesion is present in that voxel; second data set is algorithm delineated lesion area three dimensional matrix, which is thresholded with, an arbitrarily chosen, 20\% cutoff, meaning that values smaller than 20\% of maximum value in all set are assigned 0 value and others are assigned 1. Since both sets are binary, with maximum magnitude of 1, resulting DSC values ideally should be equal to 1. That would mean that all data point all compared data points coincide. For analysis DSC itself in calculates for each MRI scan slice going vertically instead of one DSC value for all. This allows to identify slices where algorithm found lesion area accurately, and where it produced just false data.
For testing 124 MRI scans were prepared, however due to limitations of the algorithm majority were rejected. This algorithm due to smoothing involved and due to brains being unsymmetrical cannot find small lesions, it can find only lesion areas that damaged a significant portion of the brain. To pick out which brains have small lesions, their manually delineated lesion regions were used and MRI scans with total lesion area of less than 5000 voxels were rejected. Another feature for rejection is lesion location. This algorithm cannot delineate the lesion if it spans both hemispheres. That is why only lesions that affect single hemisphere were selected. After this filtering, 49 MRI scans were left and algorithm was run for all of them.
DSC results are plotted as a graph for each slice. Slices where there is no data in either set has no line as the DSC equation (Eq. \ref{eq:dsc}) produces a division by zero in all plots there are slices with DSC of 0. This is due to the automatic algorithm finding larger of just different areas than in manually delineated case (Fig. \ref{fig:worstDefaultDSC}). These plots also have red dashed line representing relative size of manually delimited lesion area in that slice and green dashed line showing relative size of automatically delimited lesion area in that slice. These lines are used to indicate MRI scans which in the end got seemingly good DSC values in some slices, but that is only due to automatic algorithm delimiting huge areas of brain as affected by lesion.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/P1268_L}
\caption{Best attained per slice DSC results after running algorithm for 49 patients.}
\label{fig:bestDefaultDSC}
\end{figure}
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/P0836_R}
\caption{Worst attained per slice DSC results after running algorithm for 49 patients.}
\label{fig:worstDefaultDSC}
\end{figure}
None of the MRI scans in any of the slices attained DSC value of 1, which would be the ideal case, however in one case DSC of 0.9 in a slice was attained (Fig. \ref{fig:bestDefaultDSC}). 13 more got DSC values around 0.8 for a slice, which shows that this algorithm can delineate some lesions with a degree of precision. However, these 14 patients constitute only 29\% of the test cases. DSCs for 3 patients do not reach magnitudes of even 0.4 per slice. That is when algorithm was unable to determine where precisely is the lesion and just marked large swaths of brain as affected by lesion. Patient MRI scans in between these two extremes get some decent DSC values in some slices because of same reason – algorithm delimiting large areas as affected by lesions and marked voxels in sets at particular slice just happen to coincide. In this case total DSC value shows that algorithm failed. As an example, for patient P0743 (Fig. \ref{fig:defaultDSC0743}) one of the slices has DSC over 0.7, but automatic algorithm delimited large areas of the brain as lesion even though there was none, which brings total DSC to 0.17. This same issue is present in few of the results, which looking at the per slice DSC would imply "good" result. In case of patient P1712 (Fig. \ref{fig:defaultDSC1712}), maximum per slice DSC reaches 0.8, but the total DSC is only 0.28 implying a bad result. And really in that case automatic algorithm finds large areas of lesioned brain where there is none (Fig. \ref{fig:tooMuchDelineated}). In the end there are cases where automatic algorithm fails completely (Fig. \ref{fig:worstDefaultDSC}). However, this case is illustration of algorithm being incapable of detecting small lesions (Fig. \ref{fig:0836defaultDelineation}). Analysing MRI scan and manual lesion delineation one can see that only small areas scattered around the right hemisphere are affected by the lesion. That is the worst case for the algorithm – lesions that are small and on edges of the brain. After smoothing and removal of areas outside of presumed brain area information becomes indistinguishable.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/P0743_L}
\caption{DSC plots showing results of lesion delineation for patient P0743.}
\label{fig:defaultDSC0743}
\end{figure}
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/P1712_L}
\caption{DSC plots showing results of lesion delineation for patient P1712.}
\label{fig:defaultDSC1712}
\end{figure}
\begin{figure}[!htb]
\centering
\includegraphics[width=0.7\textwidth]{img/javaw_2017-01-27_13-55-31}
\caption{Manual delineation (A) comparison with result of automatic delineation (B) for patient P1712.}
\label{fig:tooMuchDelineated}
\end{figure}
\begin{figure}[!htb]
\centering
\includegraphics[width=0.9\textwidth]{img/javaw_2017-01-27_14-04-52}
\caption{Patient P0836 MRI scan (A) manual delineation (B) comparison with result of automatic delineation (D). Also showing found missing tissue TPM (C).}
\label{fig:0836defaultDelineation}
\end{figure}
So in the end, according to the algorithm author, lesion delineation with total DSC of 0.60 or higher can be considered “good” \cite{griffis2016voxel}. By this estimate out of my tested 49 cases only 7 delineations are "good" – mere 14\% of cases. This is likely due to predictor system that was trained by the original author just does not work with the MRI scans I have tested with. By training your own predictor system it should be possible to increase the accuracy and improve DSC scores, but that is not likely work later, when running for other cases. Author had 30 MRI scans to work with, used 29 for training and 1 to test the obtained prediction system. However, later on when testing the system he, presumably, applied it to the same 30 MRI scans. This is why his results are better – 66\% of the lesion delineations are "good" \cite{griffis2016voxel}. This is to be expected as the system was trained on those MRI scans. And that is why predictor trained there does not work in many of 49 cases I have tested on, since these vary greatly by placement, size, clustering and intensity.
However the results and overall algorithm operation can be tuned and improved editing the script and default values. So at this point I have created an edited version of authors original \texttt{lesion\_gnb.m} scrip, naming it \texttt{et\_lesion.m}. Main focus of this script is increase number of things done automatically; make the script run for multiple patients without user interaction. This was especially appealing, as even without any user interaction, using default parameters and scripts, \texttt{lesion\_gnb.m} for the 49 test cases runs around 3.76 hours (Table \ref{tbl:speeds}). \texttt{lesion\_gnb.m} requires a variety of settings on each MRI scan, however only one depends on the MRI scan itself. As such they can be set at the beginning and reused for all cases. The one case where this does not apply is indication which hemisphere of the brain is lesioned. My code determines this by MRI scan filename – last symbol in filename is either "L", signifying that the left hemisphere is lesioned, or "R", signifying that the right hemisphere is lesioned. Patient MRI scans are taken from a single directory, which is expected to contain only patient MRI scans. Results are placed in one directory, set at the beginning, which in the end contains subfolders named after patients MRI scan filenames into which all resulting segmentations, feature maps, delineations are placed.
In terms of saving storage space, 8.4 GB of space were saved (Table \ref{tbl:speeds}), roughly 40\%. However, storage space saving was not the real focus of optimization, more of a side product of improving algorithm execution speed. Most time consuming part is MRI scan segmentation in to TPMs using SPM12 toolbox. Complex segmentation algorithms strive for high accuracy \cite{Ashburner2005}, thus all the lengthy repeated calculations that are used. Also big contributor to the long processing times is that these algorithms cannot fully utilize multiple-core/multiple-thread CPUs. Usual load during processing on Intel i5-4460 4 core, 4 thread processor by MATLAB process most of the time is 40\%, and 80\% in some parts. So segmentation is a time consuming process, which means the amount of processing done during segmentation overall should be minimized.
Originally Griffis lesion delineation algorithm during segmentation saves to storage all native space TPMs – gray matter, white matter, CSF, bone, soft tissue and air/background. In addition it also saves warped modulated and unmodulated TPMs of gray matter, white matter, CSF and bone. Each TPM requires, not equivalent, but significant amount of processing. In the end, lesion delineation script utilizes only warped unmodulated TPMs of gray matter, white matter and CSF, thus first improvement of mine is generating, and saving, only the required warped TPMs – gray matter, white matter, CSF. When saving only these, segmentation function throws a warning in the cleanup process stating that it cannot be performed. Cleanup can be performed when at least one of each TPM is generated. Thus, my code also saves native space TPMs for bone, soft tissue and air/background. Initial test showed that cleanup is not necessary - tested by simple differences comparison: subtracting TPM generated by Griffis configured segmentation algorithm from TPM generated by segmentation configured by me. Maximum absolute amplitude difference equals 0, meaning there is no difference to the segmentation results from cleanup algorithm. Omission of cleanup and calculation of those extra native space TPMs would save 2 minutes per patient. However, upon manual inspection of few other cases it became apparent, that sometimes TPMs returned from segmentation have false positive values in the air/background, bone, soft tissue regions. To avoid these issues, i kept the cleanup process. In the end, segmentation configured by me, running code for all 49 patients, saves on average only 19 seconds (Table \ref{tbl:speeds}) of processing time per patient.
\begin{table}[!h]
\centering
\caption{\texttt{et\_lesion.m} vs \texttt{lesion\_gnb.m} execution times and results size comparison.}
\vspace{1ex}
\begin{tabular}{|c|c|c|c|c|c|c|c|}
\hline
& \shortstack{average \\ segmentation \\ time, s} & \shortstack{average other\\processing\\time, s} & \shortstack{average full\\processing\\time, s} & \shortstack{full run\\time, h} & \shortstack{results\\size, GB} \\ \hline
\texttt{et\_lesion.m} & 245 & 4 & 249 & 3.58 & 13.3 \\ \hline
\texttt{lesion\_gnb.m} & 264 & 12 & 276 & 3.76 & 21.7 \\ \hline
\end{tabular}
\label{tbl:speeds}
\end{table}
More time can be saved by implementing a feature that is hinted in the comment in original code, but not actually used – testing if smoothed prior/template already exists, before attempting to smooth it. In feature map calculation equations (Eq. \ref{eq:f1diffEq2} and Eq. \ref{eq:f2diffEq2}), mentioned before (Section \ref{ssec:lesionGen}), prior TPM, which is referred to as template in parts of the code, is smoothed with smoothing kernel FWHM defined by the user. As the original non-smoothed prior file used for calculations does not change, smoothing could be performed once per FWHM value and, upon code reruns, previously smoothed TPM could be reused. Check if previously smoothed TPM exists and subsequent omission of smoothing saves 8 seconds per patient in my optimized algorithm – execution time changes from 12 seconds down to 4 seconds (Table \ref{tbl:speeds}). This 4 seconds average is attained, when all runs already had smoothed prior TPM and were skipping smoothing process. Average time of the \texttt{lesion\_gnb.m} indicates how long processing would take if prior TPM would not be smoothed with a particular FWHM, effectively, showing how long it would take to when running script for the first time.
Further script optimization has comparatively small impact on processing speed or storage space taken and mostly deals in increasing code readability and minimizing redundancy in variables. However, other parameters can be optimized. My focus was on 3 of them – smoothing kernel FWHM, prior coefficient and thresholding limit of post-processed lesion delineation. Both, full DSC value and maximum per slice accuracy, values are used for result comparison purposes. Higher priority is given for full DSC as it shows full lesion delineation accuracy rather than maximum partial, per slice, accuracy.
First I started with most obvious one – smoothing kernel FWHM. This affects the three dimensional Gaussian blur of each voxel. Test were run for four patients – P1857, which had "good" lesion delineation using default values; P1712, which had "good" maximum per slice DSC, but full DSC of just 0.28; P0836, whose lesion delineation had full DSC of 0.0089 – worst of all test cases. FWHM values were taken in range from 2 to 32 with a step of 2, except for P1857 and P1712 for whom FWHM of 9 and 11 are included to better pinpoint curve peak. This gives a sizeable range which also includes the default FWHM value of 8. Resulting DSC dependence on FWHM plots show the possibility to find an optimal FWHM value (Fig. \ref{fig:fwhms}).
\begin{figure}[!htb]
\centering
\subfloat[P1857]{
\includegraphics[width=0.49\textwidth]{img/fwhm/P1857}}
\subfloat[P1712]{
\includegraphics[width=0.49\textwidth]{img/fwhm/P1712}}
\subfloat[P0836]{
\includegraphics[width=0.49\textwidth]{img/fwhm/P0836}}
%\subfloat[P0089]{
%\includegraphics[width=0.4\textwidth]{img/fwhm/P0089}}
\caption{FWHM impact on DSC testing results for three patients; (a) - "good" lesion delineation, (b) - erroneous lesion delineation, (c) - completely failed lesion delineation.}
\label{fig:fwhms}
\end{figure}
Analyzing first case, P1857 MRI scan, already shows that FWHM of 8 does not give the best possible results (Fig. \ref{fig:fwhms}A). Total DSC peak is at smoothing kernel FWHM value of 11. Maximum per slice DSC is at its peak at FWHM of 10 and quickly dips below 0.8 at FWHM 11. Thus, this gives two values to pick from. Full lesion area DSC more indicative of overall lesion delineation accuracy, thus FWHM of 11 can be stated as being best in the current testing setup. Using FWHM of 11 instead of 8 increases full DSC by 11\% – from 0.6181 to 0.6966. Theory of FWHM of 11 being better than 8 is consistent with curves obtained for P1712 MRI scans (Fig. \ref{fig:fwhms}B). Maximum per slice DSC curve peaks at FWHM of 10 and full DSC curve peaks at FWHM 11. Few extra checks were carried out best DSC values vary between FWHM 10 and 11, more often than not, 11 being better one. Thus \texttt{et\_lesion.m} will be using smoothing kernel FWHM of 11.
Further tests show that no FWHM value can help find "good" results in cases where the algorithm fails to find decent results using default parameters. In case of P0836 MRI scan, at smoothing kernel FWHM of 6 maximum per slice DSC does reach values greater than 0.5, however overall DSC is still less than 0.05. As mentioned before, delineation fails here, because this patients MRI scan has mostly small lesions and they are at the edges of the brain. For my untrained eye they were mostly invisible. Even looking at segmentations, though somewhat visible knowing where the lesions should be, they are hard to distinguish (Fig. \ref{fig:shittysegment}), thus it is no surprise that blurring of the image even further obfuscates the useful information.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/inconclusiveSegmentation}
\caption{Most obvious slice of segmentations for patient P0836 MRI scan with emphasis on lesion area: gray matter TPM with manually delimited lesion area overlaid (A), white matter (B), CSF (C)}
\label{fig:shittysegment}
\end{figure}
Next tested parameter set by user that could improve the result is prior probability coefficient. This value is used to make a two element vector, which is then used as one of parameter in prediction system. First value in the vector is 1 minus the given coefficient, second value is coefficient itself. Analyzing the source code for the script I found that default value for prior probability coefficient of 0.1064 is used by him. However, he suggests 0.5, which is the value I used in the control run testing how his script performs for my given 49 patient MRI scans. Testing of prior coefficient is performed on same three patients MRI scans from smoothing kernel FWHM impact tests, plus P1419 who had somewhat different curves. FWHM value for these tests is 11 in hopes to find a way how to further improve the result, given best found FWHM. Range of prior probability coefficients is from 0 to 1 with step size of 0.1, including extra points at both ends (0.025, 0.05, 0.95, 0.975) to further determine the point of DSC value drop off. Plots themselves show maximum per slice DSC and full lesion delineation DSC dependence on prior probability coefficient. Resulting curves are in Figure \ref{fig:priors}.
\begin{figure}[!htb]
\centering
\subfloat[P1857]{
\includegraphics[width=0.49\textwidth]{img/priors/P1857}}
\subfloat[P1712]{
\includegraphics[width=0.49\textwidth]{img/priors/P1712}}
\subfloat[P0836]{
\includegraphics[width=0.49\textwidth]{img/priors/P0836}}
\subfloat[P1419]{
\includegraphics[width=0.49\textwidth]{img/priors/P1419}}
\caption{Prior probability coefficient impact on DSC testing results for three patients.}
\label{fig:priors}
\end{figure}
Firstly, prior probability coefficient is not good at 0 or 1. For example for patient P1857 using 0 as prior probability coefficient, gives empty lesion delineation – no results; using 1 as prior probability coefficient, gives all hemisphere as lesion delineation area – too many results. Thus, values in between must be taken.
Curves themselves are quite stable, having small results variation in a wide range, from 0.025 to 0.7 prior probability coefficient values. For patient P1857, with "good" results, shown here full lesion DSC varies from 0.8311 to 0.8383 – maximum difference of 0.0072, which does not give a significant difference. In case of patient P1712 maximum difference is 0.0087 – still not a significant difference over a wide range. This insignificant difference persists with many other patients who got "good" or at least decent lesion delineations running with default parameters. However, some patient have more pronounced "better" regions – where either maximum per slice DSC or full lesion delineation DSC values are higher than in other regions (Fig. \ref{fig:priors}D). Full DSC and per slice DSC regions do not overlap in this case, however, based on same logic as before during FWHM testing, full lesion delineation DSC values are more important as such values should be picked from that range. As such in \texttt{et\_lesion.m} prior probability coefficient value is maintained 0.5 – the same as in default case, since it does fall in the "good" range.
Finally, prior probability coefficient does not help with the case of patient P0836, where lesion delineation is complete failure. In the presumed "good" range, 0.025 to 0.7, all values, both per slice and full DSC, are flat zeroes – no result. Some data did happen to overlap at prior probability coefficient value of 0.975, but even then the DSC values are bad and as such this data is not considered to be influential on the decisions made in picking prior probability coefficient to be used in \texttt{et\_lesion.m}. And as just a test if specially tailored FWHM and prior probability coefficient values could produce a decent result another quick test was run. Maximum per slice DSC of 0.7 is attained at 0.05 prior probability coefficient value, which coincides with other results, implying better maximum per slice DSC in lower ranges of prior probability coefficient. But, in the end maximum full lesion delineation DSC was still only 0.0398.
Third, and final, parameter whose impact on the resulting lesion delineation I have tested is the threshold used to convert results to binary values from floating point ones. Original \texttt{lesion\_gnb.m} script makes binary valued lesion delineation, but after post-processing this delineation once again contains gradients based on given smoothing kernel FWHM. Before DSC calculations this smoothed lesion delineation is once again converted back in to binary values. In the algorithm, threshold value is dynamic and varies per patient. Up to this test the value was 20\% of maximum value in all lesion delineation. For example, if maximum value is 1 then all values bellow 0.2 assigned 0 and all value above and including 0.2 would be assigned 1. In practice, however, maximum value is never 1 and in some cases (patient P0836 – failed delineation) as low as 0.18 magnitude.
Test was performed by changing threshold percentage in full range, from 0\% to 100\%, with a step of 2.5\%. All other test parameters are kept the same as in default algorithm, except for FWHM which is set to be 11. Test objects are post-processed lesion delineation of all 49 patients. For result accuracy evaluation once again maximum per slice DSC and full lesion delineation DSC was used.
\begin{figure}[!htb]
\centering
\subfloat[P1857]{
\includegraphics[width=0.49\textwidth]{img/cutoff/P1857}}
\subfloat[P1712]{
\includegraphics[width=0.49\textwidth]{img/cutoff/P1712}}
\caption{Thresholding point impact on DSC testing results for two patients from before.}
\label{fig:cutoffs}
\end{figure}
Firstly, I checked the results for the same three patients as used in precious tests as they represent three distinct results of lesion delineation (Fig. \ref{fig:cutoffs}). Patient P0836 is not included in the figure, because, as seen in previous test, using FWHM of 11 and prior probability coefficient of 0.5 resulting DSCs are 0. Thus thresholding does not change this result as there is no overlapping data to begin with. And only point where DSC rises above zero is at 0\%. This is, because with a threshold of 0\% all data set becomes ones, due to all values at or above threshold value being set to 1, and this means that inevitably some data does overlap. This feature of the algorithm is noticeable for all patients – everyone have same but small DSC values at threshold of 0\%. Inversely, due to the algorithm both DSC values are near zero with threshold at 100\%, because, in theory, only one voxel, with the highest magnitude, would be in the comparison set.
Other two patients do have peaks in their curves implying that there is an optimal value for thresholding. For patient P1857 this peak is at 22.5\%. Comparing this to the preciously used threshold of 20\% this gives an increase in overall DSC of $2*10^{-4}$, which is not a significant difference. Slightly higher difference is in the case of patient P1712: optimal threshold is at 27.5\% and magnitude difference is 0.01 (better result by 2\%). These two optimal thresholds do not coincide, as such, different approach is necessary to get best case threshold.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/cutoff/06cutofftest}
\caption{Curves of 24 patients full lesion delineation DSC dependence on thresholding point.}
\label{fig:ctfcurves}
\end{figure}
DSC dependence on threshold calculations are performed for all patients, however not all results should be considered further. Results that do not, at any point, reach "good" values \cite{griffis2016voxel} are omitted. This leaves 24 patients – half of all patients. However, the resulting curves still have highly varied peaks (Fig. \ref{fig:ctfcurves}). To gleam a possible result I have averaged results for these 24 patients for each threshold and looked for a peak in the resulting curve (Fig. \ref{fig:ctfsum}). Maximum magnitude is reached at 32.5\% threshold, which, comparing to magnitude attained using 20\% threshold in previous tests, gives an increase in DSC of 0.025 (3.6\%).
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/cutoff/06ctfsum}
\caption{Curve of averages of curves in figure \ref{fig:ctfcurves}.}
\label{fig:ctfsum}
\end{figure}
Running my modified \texttt{et\_lesion.m} script and changing two parameters in calculations did improve end results. For results comparison all patients full lesion delineation DSCs were used. In roughest terms, DSC improved by 6.28, meaning an average increase of 0.128 DSC per patient. Largest increase in lesion delineation DSC was for patient P1476, with the DSC increase of 0.476 and also maximum per slice DSC increase of more than 0.2 (Fig. \ref{fig:etvsgnb1476}). This significant increase appears, because the algorithm is able to effectively find the actual area of lesion during feature extraction and after prediction, lesion delineation is similar to the manual delineation. For 44 patients DSC did increase, by some amount. For five patients lesion delineation DSC fell in comparison to DSCs obtained with default parameters. Largest DSC loss was for patient P0053 with magnitude of 0.05 (Fig. \ref{fig:etvsgnb0053}). This loss is, due to script now giving significantly smaller lesion volumes, but in this case was not more precise than with default parameters. For patient P0056 lesion delineations total labeled lesion voxels by \texttt{lesion\_gnb.m} is 65 141, number of voxels labeled by \texttt{et\_lesion.m} is 27 344, as also seen in graphs comparison. These smaller lesion volumes are also what give better results in case of other patients lesion delineations.
\begin{figure}[!htb]
\centering
\subfloat[]{
\includegraphics[width=0.49\textwidth]{img/etvsgnb/P1476_Rgnb}}
\subfloat[]{
\includegraphics[width=0.49\textwidth]{img/etvsgnb/P1476_Ret}}
\caption{DSC graphs for patient P1476 obtained running \texttt{lesion\_gnb.m} (a) and \texttt{et\_lesion.m} (b).}
\label{fig:etvsgnb1476}
\end{figure}
\begin{figure}[!htb]
\centering
\subfloat[]{
\includegraphics[width=0.49\textwidth]{img/etvsgnb/P0053_Lgnb}}
\subfloat[]{
\includegraphics[width=0.49\textwidth]{img/etvsgnb/P0053_Let}}
\caption{DSC graphs for patient P0053 obtained running \texttt{lesion\_gnb.m} (a) and \texttt{et\_lesion.m} (b).}
\label{fig:etvsgnb0053}
\end{figure}
Total number of voxels marked as lesion area in \texttt{et\_lesion.m} decreased by at least 25.9\% for every patient; on average, decreased by 61.6\%, with a maximum decrease of 93.2\%. This decrease is mostly caused by thresholding level increase, but is also affected by prediction algorithm working differently when given slightly different feature maps that are influenced by new smoothing kernel FWHM. This decrease in lesion delineation volume gives higher precision, better DSC, in cases where extracted missing and abnormal tissue probability maps manage to find cores of lesions. Otherwise prediction system further muddles the information marking large swaths as lesion area and at that point removal of voxels decreases DSC.
So in the end, \texttt{et\_lesion.m} manages to get "good" \cite{griffis2016voxel} lesion delineations of 21 patients, compared to 7 obtained using script with default parameters. It is still only 43\% of patients in the specially selected test group giving good results. As such it is still not usable for general application on any MRI scan and I would like to implement some self-evaluation algorithms that would provide the user with an indication of what accuracy in the result should he expect.
\section{Algorithms attempting segmentation in 2D images}
\label{sec:poMaAl}
\subsection{Image Segmentation tutorial}
\label{ssec:imSegTut}
Started off by looking what's available online as far as implementation in MALTAB is concerned. Specifically for lesion delineation there is few options, and fewer that implement neural networks. The ones I look at don't really use neural networks I guess, but whatever.
So first off was image segmentation tutorial \cite{matlabSegmentationTutorial}. And really that is what I went most in depth with. This shows region growing methods to find nickels and dimes in the image.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/coinsThresholdedSegmentation}
\caption{Results shown by the algorithm}
\label{fig:coinsThresholdedSegmentation}
\end{figure}
It starts off by finding minima in the image. These points are grown using binary images. These are obtained by thresholding since in the grayscale image histogram clearly seen that some objects have clearly higher luminosity than others. In this case the coins have higher luminosity than the wooden background.
Once the regions are obtained they then are classified using overall blob area – whether area is larger or smaller than a pre-picked value. This part could be made with a neural network instead. Though in either case algorithms would fail if other image provided would be from a different distance and neither the hardcoded size thresholds, neither trained neural network wouldn't be able to say with certainty whether this is nickel or dime. So this points out that area alone is not sufficient feature for neural network even in this simple case.
\subsection{Tumor detectors}
\label{ssec:tumors}
Of the ones that worked there’s two - Automatic segmentation of brain tumor in MR images\cite{matlabTumor} and, seemingly a rip-off from aforementioned, Brain tumor detection from MRI images using anisotropic filter and segmentation image processing\cite{matlabTumor2}. As for why I called it a rip-off, second one is two years later and uses identical customer helper function, but, again, whatever.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/tumor2}
\caption{Older tumor detection algorithm\cite{matlabTumor} results for lesion detection.}
\label{fig:tumor1}
\end{figure}
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/tumerFull}
\caption{Newer tumor detection algorithm\cite{matlabTumor2} results for lesion detection.}
\label{fig:tumor2}
\end{figure}
In theory these try to find tumors in the brain so I guess it is no surprise that neither first one (Fig.~\ref{fig:tumor1}) nether the second one (Fig.~\ref{fig:tumor2}) is able to find lesion area. So I moved on.
\section{Feature extraction for neural network}
\label{sec:featnn}
This is where I spent most of my time. Created 4 functions that would be useful if I will get stupid enough to ever go back to this approach. First one is \texttt{NNCP\_Image\_Segmentation\_edgetech.m} – customized version of Image Segmentation Tutorial mentioned before. And in the end creates three structures that I intended to use as inputs when training neural network. These structures contain largest area blob definition as given by MATLAB function \texttt{regionprops}. That gives a lot of numbers (Fig.~\ref{fig:matalbNumbers}) which is good when going for precision in neural networks, not so much when thinking of training speed.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.3\textwidth]{img/MATLAB_2017-12-17_15-38-27}
\caption{My feature extraction/segmentation results.}
\label{fig:matalbNumbers}
\end{figure}
As to why three structures it's because I threshold binary image at three different normalized levels~-~0.3, 0.4 and 0.6~-~just some random numbers. Did not give much thought to these, just eyeballed them since in the end brain grayscale image is not nearly as clear cut as with coins on a table. So these threshold give varying blobs in cases which is good enough. In this case I'm thinking of one case in particular that I noticed while testing out: at one threshold level this large lesioned area is within the blob; at another level it is entirely outside; so I figure neural network would see this sudden disappearance of large blob area and would think that that is due to lesion.
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/meh}
\caption{My feature extraction/segmentation results.}
\label{fig:meh}
\end{figure}
Anyway, I made the script be able to optionally draw some graphs (Fig.~\ref{fig:meh}). These show blob centers on the left column, image histogram with threshold level depicted in the middle column, blob boundaries and numbers in the right column. In this case I show off the idea I had about the thresholds making blob encompass the lesion or not.
Other functions are variable/data processing shifting around and overall repetition speed up and automatization. Like the second one: \texttt{NNCP\_ImageCycling.m}. This one in essence takes a .nii file, which is 3D, and makes a bunch of 2D slices saving those as separate images (thanks to \texttt{nifti2slices.m}. Then it goes through each of those slices and sends them off to \texttt{NNCP\_Image\_Segmentation\_edgetech.m} to extract features. It concatenates those features for all slices it made and sends them on to whoever called this function. There's other bells and whistles that ease the life but, once more, whatever.
Third function, which time wise came to be first of these four, is: \texttt{nifti2slices.m}. Returns image data of specified slice or slices back to the function that requested it. Lots of fun with automation, but, effectively, ineffective use of time.
And lastly the intended front-end function: \texttt{NNCP\_ET\_NN.m}. This is what initiates feature gathering from, at this point, 6 .nii files and then parses them into a double type values matrix, since that is what I though neural network trainers would want. Also it generates the goals vector, currently gave it values by visual inspection, but I might add script for getting values from the lesion .nii files that I have.
\iffalse
\begin{figure}[!htb]
\centering
\includegraphics[width=0.8\textwidth]{img/MATLAB_2017-12-17_13-42-07}
\caption{How far I've got.}
\label{fig:fuckItImOut}
\end{figure}
\fi
And at this point I hit \texttt{nntool}. At best getting error that it does not like my inputs matrix (Fig.~\ref{fig:fuckItImOut}). I formatted data, input and output, like for apples and pears from way back when. Here functions didn't like that and I don't have motivation, energy, drive or whatever to bash my head against it till something eventually works.
\section{Conclusions}
\label{sec:conclusions}
So this is not even like the embedded systems laboratory works, where in conclusion I just said "It works". Here nothing works and I do not have in me whatever is needed to make it work. Working on images of neural networks with neural networks destroys too many neural networks by way of stress and at times anger so, for final time, whatever.
Here is the thing – neural networks are a difficult construct, especially so when trying to find a region within an image. Even more so when said image is grayscale and asymmetric and the region to look for may be there, may not be, may be nigh a circle, may be a small noodle on the side. What I am getting at is that in my situation neural networks are way too difficult a concept for me to implement in a way that would yield some conclusive results. I admit being at fault here since I left myself two days here instead of working intermittently over all semester, but eitherway from what i understand i didn't have enough data to train proper image recognition neural network. And utilizing some great MATLAB toolboxes, like SPM12, gives better results with less hassle; also allows 3-dimensional work instead of 2D (referring to J.C. Griffis idea \cite{griffis2016voxel}(Sec).
\clearpage
\section{Source Codes}
\lstset{
numbers=left,
breaklines=true,
tabsize=2,
basicstyle=\ttfamily,
}
\begin{footnotesize}
\lstinputlisting[language=Matlab]{codes/NNCP_ET_NN.m}
\lstinputlisting[language=Matlab]{codes/nifti2slices.m}
\lstinputlisting[language=Matlab]{codes/NNCP_ImageCycling.m}
\lstinputlisting[language=Matlab]{codes/NNCP_Image_Segmentation_edgetech.m}
\lstinputlisting[language=Matlab]{codes/et_dsc.m}
\lstinputlisting[language=Matlab]{codes/et_lesion.m}
\end{footnotesize}
\clearpage
\clearpage
\nocite{*}
\bibliographystyle{unsrt}
\bibliography{References}
\end{document}
| {
"alphanum_fraction": 0.7987530061,
"avg_line_length": 118.9300847458,
"ext": "tex",
"hexsha": "88ddc8073a01fbc667b72d17af3efc8dd329f9af",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e3cc8064c07f6b6768057b9121e775bd74d08673",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "EpicFailv2/open-source-repo",
"max_forks_repo_path": "NeuralNetworks/NNCP/NNCP.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e3cc8064c07f6b6768057b9121e775bd74d08673",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "EpicFailv2/open-source-repo",
"max_issues_repo_path": "NeuralNetworks/NNCP/NNCP.tex",
"max_line_length": 2102,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e3cc8064c07f6b6768057b9121e775bd74d08673",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "EpicFailv2/open-source-repo",
"max_stars_repo_path": "NeuralNetworks/NNCP/NNCP.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 13440,
"size": 56135
} |
\documentclass[a4paper,10pt]{article}
\usepackage[utf8]{inputenc}
\input{included_packages}
%opening
\title{NUR Handin Two}
\author{Jacob Bieker}
\begin{document}
\maketitle
\begin{abstract}
In this document solutions for the Numerical Recipes for Astrophysics Handin Two are presented.
\end{abstract}
\section{Setup}
The random seed for this run is:
\lstinputlisting{seed.txt}
The script that runs the rest of the codes and created that seed is:
\lstinputlisting{main.py}
\input{part_1}
\input{part_2}
\input{part_3}
\input{part_4}
\input{part_5}
\input{part_6}
\input{part_7}
\input{part_8}
\end{document}
| {
"alphanum_fraction": 0.7591706539,
"avg_line_length": 14.25,
"ext": "tex",
"hexsha": "7a556d3523bdbea145c3bec357fa3d58f075eba1",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-05-17T07:33:07.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-05-17T07:33:07.000Z",
"max_forks_repo_head_hexsha": "6e620b23191edaec4452d29eac90ec37ced0c038",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jacobbieker/NUR_Handin2",
"max_forks_repo_path": "handin_jacobbieker.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e620b23191edaec4452d29eac90ec37ced0c038",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jacobbieker/NUR_Handin2",
"max_issues_repo_path": "handin_jacobbieker.tex",
"max_line_length": 96,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e620b23191edaec4452d29eac90ec37ced0c038",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jacobbieker/NUR_Handin2",
"max_stars_repo_path": "handin_jacobbieker.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 191,
"size": 627
} |
\paragraph{}
\label{THREADS:CURRENT-THREAD}
\index{CURRENT-THREAD}
--- Function: \textbf{current-thread} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
Returns a reference to invoking thread.
\end{adjustwidth}
\paragraph{}
\label{THREADS:DESTROY-THREAD}
\index{DESTROY-THREAD}
--- Function: \textbf{destroy-thread} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:GET-MUTEX}
\index{GET-MUTEX}
--- Function: \textbf{get-mutex} [\textbf{threads}] \textit{mutex}
\begin{adjustwidth}{5em}{5em}
Acquires a lock on the `mutex'.
\end{adjustwidth}
\paragraph{}
\label{THREADS:INTERRUPT-THREAD}
\index{INTERRUPT-THREAD}
--- Function: \textbf{interrupt-thread} [\textbf{threads}] \textit{thread function \&rest args}
\begin{adjustwidth}{5em}{5em}
Interrupts THREAD and forces it to apply FUNCTION to ARGS.
When the function returns, the thread's original computation continues. If multiple interrupts are queued for a thread, they are all run, but the order is not guaranteed.
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAILBOX-EMPTY-P}
\index{MAILBOX-EMPTY-P}
--- Function: \textbf{mailbox-empty-p} [\textbf{threads}] \textit{mailbox}
\begin{adjustwidth}{5em}{5em}
Returns non-NIL if the mailbox can be read from, NIL otherwise.
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAILBOX-PEEK}
\index{MAILBOX-PEEK}
--- Function: \textbf{mailbox-peek} [\textbf{threads}] \textit{mailbox}
\begin{adjustwidth}{5em}{5em}
Returns two values. The second returns non-NIL when the mailbox
is empty. The first is the next item to be read from the mailbox.
Note that due to multi-threading, the first value returned upon
peek, may be different from the one returned upon next read in the
calling thread.
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAILBOX-READ}
\index{MAILBOX-READ}
--- Function: \textbf{mailbox-read} [\textbf{threads}] \textit{mailbox}
\begin{adjustwidth}{5em}{5em}
Blocks on the mailbox until an item is available for reading.
When an item is available, it is returned.
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAILBOX-SEND}
\index{MAILBOX-SEND}
--- Function: \textbf{mailbox-send} [\textbf{threads}] \textit{mailbox item}
\begin{adjustwidth}{5em}{5em}
Sends an item into the mailbox, notifying 1 waiter
to wake up for retrieval of that object.
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAKE-MAILBOX}
\index{MAKE-MAILBOX}
--- Function: \textbf{make-mailbox} [\textbf{threads}] \textit{\&key ((queue g284367) NIL)}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAKE-MUTEX}
\index{MAKE-MUTEX}
--- Function: \textbf{make-mutex} [\textbf{threads}] \textit{\&key ((in-use g284630) NIL)}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAKE-THREAD}
\index{MAKE-THREAD}
--- Function: \textbf{make-thread} [\textbf{threads}] \textit{function \&key name}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAKE-THREAD-LOCK}
\index{MAKE-THREAD-LOCK}
--- Function: \textbf{make-thread-lock} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
Returns an object to be used with the `with-thread-lock' macro.
\end{adjustwidth}
\paragraph{}
\label{THREADS:MAPCAR-THREADS}
\index{MAPCAR-THREADS}
--- Function: \textbf{mapcar-threads} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:OBJECT-NOTIFY}
\index{OBJECT-NOTIFY}
--- Function: \textbf{object-notify} [\textbf{threads}] \textit{object}
\begin{adjustwidth}{5em}{5em}
Wakes up a single thread that is waiting on OBJECT's monitor.
If any threads are waiting on this object, one of them is chosen to be awakened. The choice is arbitrary and occurs at the discretion of the implementation. A thread waits on an object's monitor by calling one of the wait methods.
\end{adjustwidth}
\paragraph{}
\label{THREADS:OBJECT-NOTIFY-ALL}
\index{OBJECT-NOTIFY-ALL}
--- Function: \textbf{object-notify-all} [\textbf{threads}] \textit{object}
\begin{adjustwidth}{5em}{5em}
Wakes up all threads that are waiting on this OBJECT's monitor.
A thread waits on an object's monitor by calling one of the wait methods.
\end{adjustwidth}
\paragraph{}
\label{THREADS:OBJECT-WAIT}
\index{OBJECT-WAIT}
--- Function: \textbf{object-wait} [\textbf{threads}] \textit{object \&optional timeout}
\begin{adjustwidth}{5em}{5em}
Causes the current thread to block until object-notify or object-notify-all is called on OBJECT.
Optionally unblock execution after TIMEOUT seconds. A TIMEOUT of zero
means to wait indefinitely.
A non-zero TIMEOUT of less than a nanosecond is interpolated as a nanosecond wait.
See the documentation of java.lang.Object.wait() for further
information.
\end{adjustwidth}
\paragraph{}
\label{THREADS:RELEASE-MUTEX}
\index{RELEASE-MUTEX}
--- Function: \textbf{release-mutex} [\textbf{threads}] \textit{mutex}
\begin{adjustwidth}{5em}{5em}
Releases a lock on the `mutex'.
\end{adjustwidth}
\paragraph{}
\label{THREADS:SYNCHRONIZED-ON}
\index{SYNCHRONIZED-ON}
--- Special Operator: \textbf{synchronized-on} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:THREAD}
\index{THREAD}
--- Class: \textbf{thread} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:THREAD-ALIVE-P}
\index{THREAD-ALIVE-P}
--- Function: \textbf{thread-alive-p} [\textbf{threads}] \textit{thread}
\begin{adjustwidth}{5em}{5em}
Boolean predicate whether THREAD is alive.
\end{adjustwidth}
\paragraph{}
\label{THREADS:THREAD-JOIN}
\index{THREAD-JOIN}
--- Function: \textbf{thread-join} [\textbf{threads}] \textit{thread}
\begin{adjustwidth}{5em}{5em}
Waits for thread to finish.
\end{adjustwidth}
\paragraph{}
\label{THREADS:THREAD-NAME}
\index{THREAD-NAME}
--- Function: \textbf{thread-name} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:THREADP}
\index{THREADP}
--- Function: \textbf{threadp} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
not-documented
\end{adjustwidth}
\paragraph{}
\label{THREADS:WITH-MUTEX}
\index{WITH-MUTEX}
--- Macro: \textbf{with-mutex} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
Acquires a lock on `mutex', executes the body
and releases the lock.
\end{adjustwidth}
\paragraph{}
\label{THREADS:WITH-THREAD-LOCK}
\index{WITH-THREAD-LOCK}
--- Macro: \textbf{with-thread-lock} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
Acquires a lock on the `lock', executes `body' and releases the lock.
\end{adjustwidth}
\paragraph{}
\label{THREADS:YIELD}
\index{YIELD}
--- Function: \textbf{yield} [\textbf{threads}] \textit{}
\begin{adjustwidth}{5em}{5em}
A hint to the scheduler that the current thread is willing to yield its current use of a processor. The scheduler is free to ignore this hint.
See java.lang.Thread.yield().
\end{adjustwidth}
| {
"alphanum_fraction": 0.7460070671,
"avg_line_length": 27.8543307087,
"ext": "tex",
"hexsha": "b9e1d529858f2b649f1a3a048505312359d5f95a",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2017-11-21T13:29:31.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-11-21T13:29:31.000Z",
"max_forks_repo_head_hexsha": "31ce724b21468bee0693dfc1d0ca4bc861c58e2b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "TeamSPoon/CYC_JRTL_with_CommonLisp_OLD",
"max_forks_repo_path": "abcl/doc/manual/threads.tex",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "31ce724b21468bee0693dfc1d0ca4bc861c58e2b",
"max_issues_repo_issues_event_max_datetime": "2016-10-12T16:28:48.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-09-01T19:15:27.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "TeamSPoon/CYC_JRTL_with_CommonLisp_OLD",
"max_issues_repo_path": "abcl/doc/manual/threads.tex",
"max_line_length": 230,
"max_stars_count": 10,
"max_stars_repo_head_hexsha": "31ce724b21468bee0693dfc1d0ca4bc861c58e2b",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "TeamSPoon/CYC_JRTL_with_CommonLisp_OLD",
"max_stars_repo_path": "abcl/doc/manual/threads.tex",
"max_stars_repo_stars_event_max_datetime": "2020-01-17T16:29:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-09-03T18:41:14.000Z",
"num_tokens": 2133,
"size": 7075
} |
\documentclass[a4paper,11pt,notitlepage]{report}
% Henrik Kramselund, January 2020
% [email protected],
% My standard packages
\usepackage{tex/pentest-report}
\begin{document}
\rm
\selectlanguage{english}
\input{customer.tex}
\mytitle{Penetration Testing Report}{Example A/S}
{\bf this is a sample report, may be copied and reused by anyone! See LICENSE file}
\renewcommand{\baselinestretch}{1}
{\color{titlecolor}\tableofcontents}
%\listoffigures - not used
%\listoftables - not used
\pagestyle{fancyplain}
\normal
\begin{versionhistory}
\vhEntry{1.0-draft}{2020-09-24}{HKJ}{Created}
% \vhEntry{1.0}{2020-05-28}{HKJ}{Færdig udgave kunde}
% \vhEntry{1.1}{\today}{HLK}{Customer final - appendix A errors}
\end{versionhistory}
%\renewcommand{\baselinestretch}{2}
\chapter{Introduction}
\markboth{Preface}{}
This report is the result of a penetration test activity performed by Zencurity ApS (Zencurity) against the server IP-addresses provided by customer Template A/S. The activity was performed from February 9. until February 20. 20xx.
The primary goal for this test has been to uncover vulnerabilities in the environment which may allow attackers to gain unauthorised access to the network and servers. No denial-of-service (DoS) attacks has been performed. The report contains the digested summary of the vulnerabilities found in this environment. Further we have included some raw data from some of the tools used in this testing activity.
This is an example report created using the template\\
\link{https://github.com/kramse/pentest-report}
\eject
% Main chapters
\chapter{Target Overview}
Security testing was performed against 6 IP-addresses in use by Template A/S. The targets has been the following IP-addresses::
IP address
Host name
Description
10.0.60.74
No name found
Web server
10.0.60.122
No name found
Web server
10.0.60.123
No name found
VPN server / Netscaler gateway
10.0.60.140
No name found
Web server
10.0.60.194
No name found
Web server
10.0.60.195
No name found
Web server
These addresses are routed by Global Connect and we have no further comments about routing.
Regarding whois information currently states that the subnet should be announced with origin - with the AS number AS25111 or AS2830 - which may have been correct previously.
The objects which are shown in appendix A should be removed is:
\begin{alltt}
% Information related to '10.0.60.0/24AS2830'
route: 10.0.60.0/24
descr: UUNET
origin: AS1234
mnt-by: AS4321-MNT
source: RIPE # Filtered
\end{alltt}
Since this is marked with AS4321 - it probably requires contacting XXX Networks which are AS4321.
We also noticed that the main domain Template.dk and host name www.Template.dk are not the same IP.
Template.dk has address xx.26.xx.130
Template.dk mail is handled by 10 Template-dk.mail.eo.outlook.com.
www.Template.dk has address xxx.114.xxx.74
Also the IP xxx.26.xxx.130 redirects to www.Template.dk, so we recommend updating the IP to point directly at the real web site and server.
\section{Goal and Strategy}
The review performed is based on data from customer and active testing methods.
We have been given full insight into firewall configuration, Wi-Fi administration, VLAN information, IP address plan etc.
The review contains the following parts and items:
LAN Security Review:
\begin{itemize}
\item Software version of the devices
\item Basic settings NTP, DNS, Syslog, SNMP
\item Networks and VLAN isolation
\end{itemize}
Wi-Fi Security Review:
\begin{itemize}
\item Software version of the devices
\item Networks and managed SSIDs
\item LAN connection settings, VLAN isolation
\item Unmanaged SSIDs
\item Encryption settings
\item Authentication settings
\end{itemize}
VPN Security Review:
\begin{itemize}
\item Software version of the devices
\item Site-2-site VPN
\item Client VPN
\item Encryption settings
\item Authentication settings
\end{itemize}
Firewall review:
\begin{itemize}
\item Software version of the devices
\item Basic settings NTP, DNS, Syslog
\item Management of system and users
\item Management settings including encryption settings and users
\item Firewall Zones
\item Hosts and host groups
\item Services and service groups
\item Firewall Rules
\end{itemize}
\section{Test actions performed}
We have also carried out the following active sub-tasks in this test:
\begin{itemize}
\item Full TCP port scan of outside network - including ports 1-65535
\item Sample TCP port scans of internal network
\item Nmap service scanning - attempted service identification on open ports
\item Sample UDP port scanning and service scan using Nmap UDP probes
\item Metasploit Discovery and port scan tasks of outside network
\item Metasploit Penetration task of outside network
\item TLS scanning using multiple tools for identifying the supported server ciphers
\item DNS lookups, traceroute, ICMP testing and other basic tasks
\item Manual test cases against systems found
\end{itemize}
Port scan includes protocols TCP, UDP and IP scanning utilizing various scanning techniques. The service scan performs deeper identification by sending valid requests for the services, to try to identify the actual open service in more detail.
Due to the nature of the UDP protocol it cannot be fully scanned, and results for UDP are more uncertain. Instead probes have been sent for the most popular protocols in use on top of UDP.
Exploitation has been attempted using exploits against open ports using exploits and specialized scanning for protocols identified.
\chapter{Executive Summary}
\chapter{Executive Summary}
\label{exec-summary}
This report is the result of review activities performed by \company{}. The activities were performed in \duration. Scope has been to perform a \projecttitle{} of the network at the main site.
The primary goal for this test has been to uncover weaknesses and vulnerabilities in the environment which may allow attackers to gain unauthorised access to the network and servers. We have discovered minor to high risk vulnerabilities in the tested systems.
{\bf examples only, hopefully no customer has all of these!}
The main conclusions are:
\begin{itemize}
\item Port scan and visual inspection has revealed a number of older, insecure and outdated LAN network devices. Also single points of failure, single critical devices are found in the current network\\
We recommend elmininating single point of failure for critical systems and services
\item Port scan has identified a number of unmanaged devices or devices with default vendor credentials
\item The network has a lot of management interfaces that can be attacked from the LAN
\item Port scan has shown that traffic flows internally are almost unrestricted from LAN segments to other parts, and from on-site and into remote segments connected through VPN\\
We recommend implementing basic filtering, to restrict data flows, and to ensure they are in place when needed in the future
\item A malware incident and/or hacker activity in this network would have a high risk of infecting many parts of the infrastructure.\\
We highly recommend creating new zones/VLANs for isolation and segregation
\item Wireless networks managed by \customer{} are encrypted using up-to-date protocols
\item Wireless scan has identified a number of unmanaged wireless networks, or test networks with unknown security levels
\item We have observed that Wi-Fi solutions are configured with multiple SSIDs sharing VLANs.\\
We recommend reconfiguration of the VLAN settings
\item Some wireless network use shared key systems WPA-Personal -- with keys known by former employees that have left the organisation\\
We recommend setting dates for removal of the wireless networks using WPA-Personal shared keys
\item VPN solutions are configured with older and insecure encryption and integrity algorithms.\\
We recommend reconfiguration of the VPN settings
\item Firewall review has identified few problems with the firewall itself, as the firewall was reconfigured recently.
\item Firewall review has shown the current firewall policies to be very open. Current policies allow devices in network segments used by employees, wired and wireless, to communicate freely -- even across production, development and testing facilities. This is not according to best current practice and we recommend creating new zones/VLANs for isolation and segregation
\item Firewall review show that networks are not separated and there is a high likelihood that problems in one area of the network will affect the whole network and all users
\item ...
\end{itemize}
The overall conclusion is that the current network is not sufficiently protected from attacks due to almost no segregation of the zones used. This coupled with a user base that are allowed to connect and disconnect a number of devices, servers and systems create a high risk of security incidents involving large parts of the organisation.
We recommend the following initiatives regarding networks at \customer{} are put into places with a priority to isolate and improve the networks and devices:
\begin{enumerate}
\item Unmanaged devices should be removed or controlled
\item Single point of failure should be eliminated to avoid disruptions to normal business
\item Best current security practices dictate the placement of the management ports on a dedicated management LAN or VLAN restricted to trusted Administrators.
\item VPN site-2-site settings should be updated, and requires few resources. This includes implementing basic filtering, that can be expanded in case of incidents
\item Client VPN solutions should have updated settings, and would benefit from a single solution. Client VPN should also have basic filtering implemented, that can be expanded in case of incidents
\item Traffic flows between segregated networks should be monitored closely
\item Port-security should be used for limiting the use of unmanaged devices, and limiting the effect of connecting other equipment that may affect the networks. Port-security can also be used for automatically joining a connecting user to the right network
\item ...
\end{enumerate}
Finally, Zencurity recommends that \customerlong{} performs periodic security testing of its business to verify that mechanisms and processes implemented to protect critical company assets are working as expected. Especially the processes and mechanisms that will detect and respond to an attack.
OR
We have no further recommendations to the current environment, and can only recommend that the instructions from the vendors are following regarding the administration, software upgrading and control of this environment.
The following sections described in more detail the information uncovered during this testing.
\eject
\chapter{Scanning}
During this testing project we have uncovered open TCP ports and other services, as to be expected from such an environment. The open ports and services have been identified further and examined by tools known as service scanning. We have also concluded that a firewall/filtering device is in place, which can be seen in the responses received - and responses not received for port requests sent.
The open ports and services are shown below. The firewall in place is reported with the port status filtered, which is according to best current practice. Most ports are filtered which is good.
Due to the nature of the UDP protocol it cannot be fully scanned, and results for UDP are more uncertain. Instead probes have been sent for the most popular protocols in use on top of UDP. We have tried sending UDP probes for the 100 most popular UDP ports found on the internet. A full UDP port scan could not be completed as the firewall discards the probes without sending any indication if the UDP ports are open or not.
We have also performed invasive intrusion attempts at the services.
\chapter{Overview of Open Ports}
The found servers and open ports are shown below:
\begin{alltt}\footnotesize
Hosts
=====
address mac name os_name os_flavor os_sp purpose info comments
------- --- ---- ------- --------- ----- ------- ---- --------
10.0.60.74 10.0.60.74 Windows device
10.0.60.122 10.0.60.122 Windows 7 client
10.0.60.123 10.0.60.123 embedded device
10.0.60.140 10.0.60.140 Windows Vista client
10.0.60.194 10.0.60.194 Windows 7 client
10.0.60.195 10.0.60.195 Windows 2008 server
\end{alltt}
\begin{alltt}\footnotesize
Services
========
host port proto name state info
---- ---- ----- ---- ----- ----
10.0.60.74 80 tcp http open Microsoft-HTTPAPI/2.0
10.0.60.122 80 tcp http open ( 403-Forbidden ( The server denied the specified Uniform Resource Locator (URL). Contact the server administrator. ) )
10.0.60.122 443 tcp https open ( 403-Forbidden ( The server denied the specified Uniform Resource Locator (URL). Contact the server administrator. ) )
10.0.60.123 443 tcp https open ( 302-/vpn/tmindex.html )
10.0.60.140 80 tcp http open ( 302-https://10.0.60.140/ )
10.0.60.140 443 tcp https open ( 403-Forbidden ( The page requires a client certificate as part of the authentication process. If you are using a smart card, you will need to insert your smart card to select an appropriate certificate. Otherwise, contact your server administrator. ) )
10.0.60.194 80 tcp http open ( 403-Forbidden ( The server denied the specified Uniform Resource Locator (URL). Contact the server administrator. ) )
10.0.60.194 443 tcp https open ( 403-Forbidden ( The server denied the specified Uniform Resource Locator (URL). Contact the server administrator. ) )
10.0.60.195 80 tcp http open ( 403-Forbidden ( The server denied the specified Uniform Resource Locator (URL). Contact the server administrator. ) )
10.0.60.195 443 tcp https open ( 403-Forbidden ( The server denied the specified Uniform Resource Locator (URL). Contact the server administrator. ) )
\end{alltt}
\chapter{Host 10.0.60.74}
We have the following basic information about this host:
\begin{alltt}
Nmap scan report for 10.0.60.74
Host is up (0.017s latency).
Not shown: 65534 filtered ports
PORT STATE SERVICE VERSION
80/tcp open http Microsoft ISA httpd
|_http-methods: No Allow or Public header in OPTIONS response (status code 404)
|_http-title: Not Found
Service Info: OS: Windows; CPE: cpe:/o:microsoft:windows
\end{alltt}
The ports and services found are expected with web servers
The ports identified are probably used for:
port 80 HTTP Hypertext Transfer Protocol unencrypted
The protocols are expected in this environment.
Vulnerabilities
We have not uncovered vulnerabilities for this server and have no further recommendations.
\chapter{Host 10.0.60.122}
We have the following basic information about this host:
\begin{alltt}
Nmap scan report for 10.0.60.122
Host is up (0.017s latency).
Not shown: 65533 filtered ports
PORT STATE SERVICE VERSION
80/tcp open http Microsoft IIS
|_http-methods: No Allow or Public header in OPTIONS response (status code 403)
|_http-title: The page cannot be displayed
443/tcp open ssl/http Microsoft IIS
|_http-methods: No Allow or Public header in OPTIONS response (status code 403)
|_http-title: The page cannot be displayed
| ssl-cert: Subject: commonName=*.Template.com/organizationName=Template A/S/stateOrProvinceName=Copenhagen/countryName=DK
| Not valid before: 2010-12-14T11:50:26+00:00
|_Not valid after: 2015-12-14T11:50:24+00:00
|_ssl-date: 2015-02-20T07:49:51+00:00; 0s from local time.
| sslv2:
| SSLv2 supported
| ciphers:
| SSL2_RC4_128_WITH_MD5
|_ SSL2_DES_192_EDE3_CBC_WITH_MD5
Service Info: OS: Windows; CPE: cpe:/o:microsoft:windows
\end{alltt}
The ports and services found are expected with web servers
The ports identified are probably used for:
\begin{list2}
\item port 80 HTTP Hypertext Transfer Protocol unencrypted
\item port 443 HTTPS Hypertext Transfer Protocol Secure encrypted
\end{list2}
The protocols are expected in this environment.
Vulnerabilities
This server supports SSL version 2 and SSL version 3 both which should be turned off.
\begin{alltt}
Testing SSL server 10.0.60.122 on port 443
Supported Server Cipher(s):
Accepted SSLv2 128 bits RC4-MD5
Accepted SSLv2 112 bits DES-CBC3-MD5
Accepted SSLv3 128 bits RC4-SHA
Accepted SSLv3 128 bits RC4-MD5
Accepted SSLv3 112 bits DES-CBC3-SHA
\end{alltt}
We have not uncovered further vulnerabilities for this server and have no further recommendations.
\chapter{Host 10.0.60.123}
We have the following basic information about this host:
\begin{alltt}
Nmap scan report for 10.0.60.123
Host is up (0.018s latency).
Not shown: 65534 filtered ports
PORT STATE SERVICE VERSION
443/tcp open ssl/https
|_http-methods: No Allow or Public header in OPTIONS response (status code 302)
| http-title: NetScaler Gateway
|_Requested resource was /vpn/tmindex.html
| ssl-cert: Subject: commonName=*.Template.com/organizationName=Template A/S/stateOrProvinceName=Copenhagen/countryName=DK
| Not valid before: 2010-12-14T11:50:26+00:00
|_Not valid after: 2015-12-14T11:50:24+00:00
|_ssl-date: 2015-02-20T07:50:04+00:00; +13s from local time.
\end{alltt}
The ports and services found are expected with web servers
The ports identified are probably used for:
port 80 HTTP Hypertext Transfer Protocol unencrypted
port 443 HTTPS Hypertext Transfer Protocol Secure encrypted
The protocols are expected in this environment.
Vulnerabilities
This server supports SSL version 2 and SSL version 3 both which should be turned off.
\begin{alltt}
Testing SSL server 10.0.60.123 on port 443
Supported Server Cipher(s):
Accepted SSLv3 256 bits ECDHE-RSA-AES256-SHA
Accepted SSLv3 256 bits AES256-SHA
Accepted SSLv3 128 bits ECDHE-RSA-AES128-SHA
Accepted SSLv3 128 bits AES128-SHA
Accepted SSLv3 128 bits ECDHE-RSA-RC4-SHA
Accepted SSLv3 128 bits RC4-SHA
Accepted SSLv3 128 bits RC4-MD5
Accepted SSLv3 112 bits ECDHE-RSA-DES-CBC3-SHA
Accepted SSLv3 112 bits DES-CBC3-SHA
\end{alltt}
We have not uncovered further vulnerabilities for this server and have no further recommendations.
\appendix
\rhead{\fancyplain{}{\bf \leftmark}}
%\setlength{\parskip}{5pt}
\normal
\chapter{Whois}
This section contains the whois information about the customer range. We always perform this lookup to ensure we are targetting the correct customer.
NOTE: in this test we observed that an extra route-object exist, which point to another origin AS number for this range. Customer should instruct network department to fix this to avoid future routing problems.
NOTE: we can see that ZENCURITY-MNT is allowed to make changes to these object, which is probably not relevant anymore and should be removed.
\begin{alltt}
inetnum: 193.0.56.0 - 10.0.60.255
netname: Template
descr: Template A/S
country: DK
% Information related to '10.0.56.0/22AS1234'
route: 10.0.56.0/22
descr: Template Network
origin: AS12345
mnt-by: AS12345-MNT
source: RIPE # Filtered
\end{alltt}
\chapter{DNS and Name Servers}
We have performed lookups with regards to the main domain Template.dk
Name servers for the domain (host -t ns Template.dk):
\begin{alltt}
Template.dk name server ns1.ascio.net.
Template.dk name server ns2.ascio.net.
ns1.ascio.net has address 185.26.230.9
ns2.ascio.net has address 80.237.153.102
\end{alltt}
We have performed lookups with regards to the main domain Template.com
Name servers for the domain (host -t ns Template.com):
\begin{alltt}
Template.com name server ns2.ascio.net.
Template.com name server ns1.ascio.net.
Template.com name server ns4.ascio.net.
Template.com name server ns3.ascio.net.
ns1.ascio.net has address 185.26.230.9
ns2.ascio.net has address 80.237.153.102
ns3.ascio.net has address 54.183.16.145
ns4.ascio.net has address 72.32.149.232
\end{alltt}
This shows at least two name servers for domains, and these are placed in separate subnets.
No further comments about domains.
\chapter{Test Servers}
We have performed the testing from the IP addresses below:
\begin{list2}
\item 91.102.91.16/28 main test range
\item 185.27.115.0/24 main test range
\item 109.238.48.20 manual verification via VPN
\end{list2}
\bibliographystyle{alpha}
%\bibliography{../ipv6-reference/security6-net.bib,../ipv6-reference/rfc.bib,../ipv6-reference/std.bib,../ipv6-reference/fyi.bib}
%\bibliography{kramse.bib,rfc.bib,std.bib,fyi.bib}
%,internet.bib}
%\printindex
\label{LastPage}
\end{document}
| {
"alphanum_fraction": 0.7682880123,
"avg_line_length": 41.609561753,
"ext": "tex",
"hexsha": "01f0e4fb8a042c3d0b07e6b4785692c28db7f9b7",
"lang": "TeX",
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-01-01T00:51:19.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-01-20T21:13:01.000Z",
"max_forks_repo_head_hexsha": "442757172f4cbdf75aa77e61d95969dbb2e4219b",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "kramse/pentest-report",
"max_forks_repo_path": "pentest-report-sample.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "442757172f4cbdf75aa77e61d95969dbb2e4219b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "kramse/pentest-report",
"max_issues_repo_path": "pentest-report-sample.tex",
"max_line_length": 424,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "442757172f4cbdf75aa77e61d95969dbb2e4219b",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "kramse/pentest-report",
"max_stars_repo_path": "pentest-report-sample.tex",
"max_stars_repo_stars_event_max_datetime": "2020-12-15T16:24:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-15T16:24:16.000Z",
"num_tokens": 5139,
"size": 20888
} |
% zen 'for beeing included' snippet template
%
% (c) Karsten Reincke, Frankfurt a.M. 2012, ff.
%
% This text is licensed under the Creative Commons Attribution 3.0 Germany
% License (http://creativecommons.org/licenses/by/3.0/de/): Feel free to share
% (to copy, distribute and transmit) or to remix (to adapt) it, if you respect
% how you must attribute the work in the manner specified by the author(s):
% \newline
% In an internet based reuse please link the reused parts to zen.fodina.de
% and mention the original author Karsten Reincke in a suitable manner. In a
% paper-like reuse please insert a short hint to zen.fodina.de and to the
% original author, Karsten Reincke, into your preface. For normal quotations
% please use the scientific standard to cite
%
%% use all entries of the bibliography
%\nocite{*}
\section{Kontextreferat}
\section{Zentralthese}
\section{Kommentar}
\section{Bibliographische Angaben im Test}
Komplettangabe\footnote{\cite[cf.][S.1]{Covey2006a}}.
Unmittelbare Folgereferenz\footcite[cf.][S.2]{Covey2006a}.
Zwischenreferenz\footcite[cf.][S.3]{KantKdU1974}.
Mittelbare Folgereferenz\footcite[cf.][S.4]{Covey2006a}.
\bibliography{../bibfiles/zenResourcesDe}
| {
"alphanum_fraction": 0.7652464495,
"avg_line_length": 36.2727272727,
"ext": "tex",
"hexsha": "7707ca755fe72b4ae6ef80aede17097d51615dcb",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e9002af65568f8a08df176eed24bc664582d62b9",
"max_forks_repo_licenses": [
"CC-BY-3.0"
],
"max_forks_repo_name": "kreincke/myzen",
"max_forks_repo_path": "snippets/zenSnippetInc.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e9002af65568f8a08df176eed24bc664582d62b9",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-3.0"
],
"max_issues_repo_name": "kreincke/myzen",
"max_issues_repo_path": "snippets/zenSnippetInc.tex",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e9002af65568f8a08df176eed24bc664582d62b9",
"max_stars_repo_licenses": [
"CC-BY-3.0"
],
"max_stars_repo_name": "kreincke/myzen",
"max_stars_repo_path": "snippets/zenSnippetInc.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 357,
"size": 1197
} |
\hypertarget{inference}{%
\chapter{Inference}\label{inference}}
\href{https://colab.research.google.com/github/AllenDowney/ElementsOfDataScience/blob/master/11_inference.ipynb}{Click
here to run this notebook on Colab} or
\href{https://github.com/AllenDowney/ElementsOfDataScience/raw/master/11_inference.ipynb}{click
here to download it}.
This chapter introduces \textbf{statistical inference}, which is the
process of using a sample to make inferences about a population.
The \textbf{population} is the group we are interested in. Sometimes it
is a group of people, but in general it can be any kind of group.
If we can observe the entire group, we might not need statistical
inference. If not, sometimes we can observe a \textbf{sample} (or
subset) of the population and use those observations to make claims
about the population.
If you have studied statistics before, you might have encountered some
of these ideas before: hypothesis testing, p-values, estimation,
standard error, and confidence intervals.
In this chapter we'll approach these topics using computation and
simulation, as opposed to mathematical analysis. I hope this approach
makes the ideas clearer.
If you have not seen these ideas before, don't worry. That might even be
better.
We'll look at three examples:
\begin{itemize}
\item
Testing whether a coin is ``fair''.
\item
Testing whether first babies are more like to be born early (or late).
\item
Estimating the average height of men in the U.S., and quantifying the
precision of the estimate.
\end{itemize}
\hypertarget{the-euro-problem}{%
\section{The Euro problem}\label{the-euro-problem}}
In \emph{Information Theory, Inference, and Learning Algorithms}, David
MacKay writes, "A statistical statement appeared in \emph{The Guardian}
on Friday January 4, 2002:
\begin{quote}
When spun on edge 250 times, a Belgian one-euro coin came up heads 140
times and tails 110. `It looks very suspicious to me', said Barry
Blight, a statistics lecturer at the London School of Economics. `If the
coin were unbiased the chance of getting a result as extreme as that
would be less than 7\%'.*
\end{quote}
But do these data give evidence that the coin is biased rather than
fair?"
Before we answer MacKay's question, let's unpack what Dr.~Blight said:
``If the coin were unbiased the chance of getting a result as extreme as
that would be less than 7\%''.
To see where that comes from, I'll simulate the result of spinning an
``unbiased'' coin, meaning that the probability of heads is 50\%.
Here's an example with 10 spins:
\begin{lstlisting}[language=Python,style=source]
import numpy as np
spins = np.random.random(10) < 0.5
spins
\end{lstlisting}
\begin{lstlisting}[style=output]
array([ True, True, True, False, True, True, True, True, True,
False])
\end{lstlisting}
\passthrough{\lstinline!np.random.random!} returns numbers between 0 and
1, uniformly distributed. So the probability of being less than 0.5 is
50\%.
The sum of the array is the number of \passthrough{\lstinline!True!}
elements, that is, the number of heads:
\begin{lstlisting}[language=Python,style=source]
np.sum(spins)
\end{lstlisting}
\begin{lstlisting}[style=output]
8
\end{lstlisting}
We can wrap that in a function that simulates
\passthrough{\lstinline!n!} spins with probability
\passthrough{\lstinline!p!}.
\begin{lstlisting}[language=Python,style=source]
def spin(n, p):
return np.sum(np.random.random(n) < p)
\end{lstlisting}
Here's an example with the actual sample size (250) and hypothetical
probability (50\%).
\begin{lstlisting}[language=Python,style=source]
heads, tails = 140, 110
sample_size = heads + tails
\end{lstlisting}
\begin{lstlisting}[language=Python,style=source]
hypo_prob = 0.5
spin(sample_size, hypo_prob)
\end{lstlisting}
\begin{lstlisting}[style=output]
119
\end{lstlisting}
Since we are generating random numbers, we expect to see different
values if we run the experiment more than once.
Here's a loop that runs \passthrough{\lstinline!spin!} 10 times.
\begin{lstlisting}[language=Python,style=source]
n = 250
p = 0.5
for i in range(10):
print(spin(n, p))
\end{lstlisting}
\begin{lstlisting}[style=output]
114
121
116
131
141
139
135
129
127
133
\end{lstlisting}
As expected, the results vary from one run to the next.
Now let's run the simulated experiment 10000 times and store the results
in a NumPy array.
\begin{lstlisting}[language=Python,style=source]
outcomes = np.empty(1000)
for i in range(len(outcomes)):
outcomes[i] = spin(n, p)
\end{lstlisting}
\passthrough{\lstinline!np.empty!} creates an empty array with the given
length. Each time through the loop, we run
\passthrough{\lstinline!spin!} and assign the result to an element of
\passthrough{\lstinline!outcomes!}.
The result is an array of 10000 integers, each representing the number
of heads in a simulated experiment.
The mean of \passthrough{\lstinline!outcomes!} is about 125:
\begin{lstlisting}[language=Python,style=source]
np.mean(outcomes)
\end{lstlisting}
\begin{lstlisting}[style=output]
124.967
\end{lstlisting}
Which makes sense. On average, the expected number of heads is the
product of the hypothetical probability and the sample size:
\begin{lstlisting}[language=Python,style=source]
expected = hypo_prob * sample_size
expected
\end{lstlisting}
\begin{lstlisting}[style=output]
125.0
\end{lstlisting}
Now let's see how much the values in \passthrough{\lstinline!outcomes!}
differ from the expected value:
\begin{lstlisting}[language=Python,style=source]
diffs = outcomes - expected
\end{lstlisting}
\passthrough{\lstinline!diffs!} is an array that contains the deviation
of each experiment from the expected value, 125.
Here's the mean of the absolute deviations:
\begin{lstlisting}[language=Python,style=source]
np.mean(abs(diffs))
\end{lstlisting}
\begin{lstlisting}[style=output]
6.673
\end{lstlisting}
So a typical experiment deviates from the mean by about 6.
To see the whole distribution of deviations, we can plot a histogram.
The following function uses Matplotlib to plot a histogram and adjust
some of the settings.
\begin{lstlisting}[language=Python,style=source]
import matplotlib.pyplot as plt
def plot_hist(values):
xs, ys, patches = plt.hist(values,
density=True,
histtype='step',
linewidth=2,
alpha=0.5)
plt.ylabel('Density')
plt.tight_layout()
return patches[0]
\end{lstlisting}
Here's what the distribution of deviations looks like:
\begin{lstlisting}[language=Python,style=source]
plot_hist(diffs)
plt.title('Sampling distribution (n=250)')
plt.xlabel('Deviation from expected number of heads');
\end{lstlisting}
\begin{figure}
\centering
\includegraphics{11_inference_files/11_inference_29_0.pdf}
\caption{png}
\end{figure}
This is the ``sampling distribution'' of deviations. It shows how much
variation we should expect between experiments with this sample size (n
= 250).
\hypertarget{p-values}{%
\section{P-values}\label{p-values}}
Getting get back to the Euro example, Dr.~Bright reported:
``If the coin were unbiased the chance of getting a result as extreme as
that would be less than 7\%''.
The article doesn't say so explicitly, but this is a ``p-value''. To
understand what that means, let's count how many times, in 10000
attempts, the outcome is ``as extreme as'' the observed outcome, 140
heads.
The observed deviation is the difference between the observed and
expected number of heads:
\begin{lstlisting}[language=Python,style=source]
observed_diff = heads - expected
observed_diff
\end{lstlisting}
\begin{lstlisting}[style=output]
15.0
\end{lstlisting}
Let's see how many times the simulated \passthrough{\lstinline!diffs!}
exceed the observed deviation:
\begin{lstlisting}[language=Python,style=source]
np.mean(diffs >= observed_diff)
\end{lstlisting}
\begin{lstlisting}[style=output]
0.039
\end{lstlisting}
It's around 3\%. But Dr.~Blight said 7\%. Where did that come from?
So far, we only counted the cases where the outcome is \emph{more} heads
than expected. We might also want to count the cases where the outcome
is \emph{fewer} than expected.
Here's the probability of falling below the expected number by 15 or
more.
\begin{lstlisting}[language=Python,style=source]
np.mean(diffs <= -observed_diff)
\end{lstlisting}
\begin{lstlisting}[style=output]
0.033
\end{lstlisting}
To get the total probability of a result ``as extreme as that'', we can
use the absolute value of the simulated differences:
\begin{lstlisting}[language=Python,style=source]
np.mean(abs(diffs) >= observed_diff)
\end{lstlisting}
\begin{lstlisting}[style=output]
0.072
\end{lstlisting}
So that's consistent with what Dr.~Blight reported.
To show what that looks like graphically, I'll use the following
function, which fills in the histogram between
\passthrough{\lstinline!low!} and \passthrough{\lstinline!high!}.
\begin{lstlisting}[language=Python,style=source]
def fill_hist(low, high, patch):
fill = plt.axvspan(low, high,
clip_path=patch,
alpha=0.5,
color='C0')
\end{lstlisting}
The following plot shows the sampling distribution of
\passthrough{\lstinline!diffs!} with two regions shaded. These regions
represent the probability that an unbiased coin yields a deviation from
the expected as extreme as 15.
\begin{lstlisting}[language=Python,style=source]
patch = plot_hist(diffs)
# fill the right tail of the hist
low = observed_diff
high = diffs.max()
fill_hist(low, high, patch)
# fill the left tail of the hist
low = diffs.min()
high = -observed_diff
fill_hist(low, high, patch)
plt.title('Sampling distribution (n=250)')
plt.xlabel('Deviation from expected number of heads')
plt.ylabel('Density');
\end{lstlisting}
\begin{figure}
\centering
\includegraphics{11_inference_files/11_inference_42_0.pdf}
\caption{png}
\end{figure}
These results show that there is a non-negligible chance of getting a
result as extreme as 140 heads, even if the coin is actually fair.
So even if the results are ``suspicious'' they don't provide compelling
evidence that the coin is biased.
\textbf{Exercise:} There are a few ways to make ``crooked'' dice, that
is, dice that are more likely to land on one side than the others.
Suppose you run a casino and you suspect that a patron is using a die
that comes up 3 more often than it should.
You confiscate the die, roll it 300 times, and 63 times it comes up 3.
Does that support your suspicions?
\begin{itemize}
\item
To answer this question, use \passthrough{\lstinline!spin!} to
simulate the experiment, assuming that the die is fair.
\item
Use a for loop to run \passthrough{\lstinline!spin!} 1000 times and
store the results in a NumPy array.
\item
What is the mean of the results from the simulated experiments?
\item
What is the expected number of 3s if the die is fair?
\item
Use \passthrough{\lstinline!plot\_hist!} to plot the results. The
histogram you plot approximates the sampling distribution.
\end{itemize}
\textbf{Exercise:} Continuing the previous exercise, compute the
probability of seeing a deviation from the expected value that is ``as
extreme'' as the observed difference.
For this context, what do you think is the best definition of ``as
extreme''?
Plot the histogram of the random deviations again, and use
\passthrough{\lstinline!fill\_hist!} to fill the region of the histogram
that corresponds to the p-value you computed.
\hypertarget{are-first-babies-more-likely-to-be-late}{%
\section{Are first babies more likely to be
late?}\label{are-first-babies-more-likely-to-be-late}}
The examples so far have been based on coins and dice, which are
relatively simple. In this section we'll look at an example that's based
on real-world data.
Here's the motivation for it: When my wife and I were expecting our
first baby, we heard that first babies are more likely to be late. We
also hear that first babies are more likely to be early. Neither claim
was supported by evidence.
Fortunately, I am a data scientist! Also fortunately, the CDC runs the
National Survey of Family Growth (NSFG), which ``gathers information on
family life, marriage and divorce, pregnancy, infertility, use of
contraception, and men's and women's health.''
I got the data from their web page,
https://www.cdc.gov/nchs/nsfg/index.htm, and wrote some code to get it
into a Pandas Dataframe:
\begin{lstlisting}[language=Python,style=source]
import pandas as pd
nsfg = pd.read_hdf('nsfg.hdf5')
nsfg.shape
\end{lstlisting}
\begin{lstlisting}[style=output]
(9358, 11)
\end{lstlisting}
The \passthrough{\lstinline!nsfg!} DataFrame contains 9358 rows, one for
each recorded pregnancy, and 11 columns, one of each of the variables I
selected.
Here are the first few lines.
\begin{lstlisting}[language=Python,style=source]
nsfg.head()
\end{lstlisting}
\begin{tabular}{lrrrrrrrrrrr}
\toprule
{} & caseid & outcome & birthwgt\_lb1 & birthwgt\_oz1 & prglngth & nbrnaliv & agecon & agepreg & birthord & hpagelb & wgt2013\_2015 \\
\midrule
0 & 60418 & 1 & 5.0 & 4.0 & 40 & 1.0 & 2000 & 2075.0 & 1.0 & 22.0 & 3554.964843 \\
1 & 60418 & 1 & 4.0 & 12.0 & 36 & 1.0 & 2291 & 2358.0 & 2.0 & 25.0 & 3554.964843 \\
2 & 60418 & 1 & 5.0 & 4.0 & 36 & 1.0 & 3241 & 3308.0 & 3.0 & 52.0 & 3554.964843 \\
3 & 60419 & 6 & NaN & NaN & 33 & NaN & 3650 & NaN & NaN & NaN & 2484.535358 \\
4 & 60420 & 1 & 8.0 & 13.0 & 41 & 1.0 & 2191 & 2266.0 & 1.0 & 24.0 & 2903.782914 \\
\bottomrule
\end{tabular}
The variables we need are \passthrough{\lstinline!birthord!}, which
indicates birth order, and \passthrough{\lstinline!prglength!}, which is
pregnancy length in weeks.
I'll make two boolean Series, one for first babies and one for others.
\begin{lstlisting}[language=Python,style=source]
firsts = (nsfg.birthord == 1)
others = (nsfg.birthord > 1)
np.sum(firsts), np.sum(others)
\end{lstlisting}
\begin{lstlisting}[style=output]
(3067, 3422)
\end{lstlisting}
We can use the boolean Series to select pregnancy lengths for the two
groups and compute their means:
\begin{lstlisting}[language=Python,style=source]
prglngth = nsfg['prglngth']
mean_first = prglngth[firsts].mean()
mean_other = prglngth[others].mean()
mean_first, mean_other
\end{lstlisting}
\begin{lstlisting}[style=output]
(38.57124225627649, 38.36908240794857)
\end{lstlisting}
Here's the difference in means, in weeks.
\begin{lstlisting}[language=Python,style=source]
diff = mean_first - mean_other
diff
\end{lstlisting}
\begin{lstlisting}[style=output]
0.20215984832792344
\end{lstlisting}
Here it is converted to days:
\begin{lstlisting}[language=Python,style=source]
diff * 7
\end{lstlisting}
\begin{lstlisting}[style=output]
1.415118938295464
\end{lstlisting}
It looks like first babies are born 1.4 days later than other babies, on
average.
\hypertarget{hypothesis-testing}{%
\section{Hypothesis testing}\label{hypothesis-testing}}
The apparent difference between these groups is based on a random sample
that is much smaller than the actual population. So we can't be sure
that the difference we see in the sample reflects a real difference in
the population. There are two other possibilities we should keep in
mind:
\begin{itemize}
\item
Systematic errors: The sample might be more likely to include some
pregancies, and less likely to include others, in a way that causes an
apparent difference in the sample, even if there is no such difference
in the population.
\item
Sampling errors: Even if every pregnancy is equally likely to appear
in the sample, it is still possible to see a difference in the sample
that is not in the population, just because of random variation.
\end{itemize}
We can never rule out the possibility of systematic errors, but we
\emph{can} test whether an apparent effect could be explained by random
sampling.
Here's how:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
First we choose a ``test statistic'' that measures the size of the
effect; the test statistic in this example is the difference in mean
pregnancy length.
\item
Next we define a model of the population under the assumption that
there is actually no difference between the groups. This assumption is
called the ``null hypothesis''.
\item
Then we use the model to compute the distribution of the test
statistic under the null hypothesis.
\end{enumerate}
We have already done step 1, but to make it easier to repeat, I'll wrap
it in a function.
\begin{lstlisting}[language=Python,style=source]
def test_stat(group1, group2):
"""Difference in means.
group1: sequence of values
group2: sequence of values
returns: float difference in means
"""
diff = np.mean(group1) - np.mean(group2)
return diff
\end{lstlisting}
\passthrough{\lstinline!test\_stat!} takes two sequences and computes
the difference in their means.
Here's how we use it to compute the actual difference in the sample.
\begin{lstlisting}[language=Python,style=source]
group1 = prglngth[firsts]
group2 = prglngth[others]
actual_diff = test_stat(group1, group2)
actual_diff
\end{lstlisting}
\begin{lstlisting}[style=output]
0.20215984832792344
\end{lstlisting}
Now we have to define the null hypothesis, which is a model of the world
where there is no difference in pregnancy length between first babies
and others.
One way to do that is to put the two groups together and then divide
them up again at random. That way the distribution of pregnancy lengths
is the same for both groups.
I'll use \passthrough{\lstinline!concatenate!} to pool the groups.
\begin{lstlisting}[language=Python,style=source]
len(group1), len(group2)
\end{lstlisting}
\begin{lstlisting}[style=output]
(3067, 3422)
\end{lstlisting}
\begin{lstlisting}[language=Python,style=source]
pool = np.concatenate([group1, group2])
pool.shape
\end{lstlisting}
\begin{lstlisting}[style=output]
(6489,)
\end{lstlisting}
I'll use \passthrough{\lstinline!shuffle!} to reorder them.
\begin{lstlisting}[language=Python,style=source]
np.random.shuffle(pool)
\end{lstlisting}
Then I'll use \passthrough{\lstinline!split!} to make two simulated
groups, the same size as the originals.
\begin{lstlisting}[language=Python,style=source]
n = len(group1)
sim_group1, sim_group2 = np.split(pool, [n])
\end{lstlisting}
\begin{lstlisting}[language=Python,style=source]
len(sim_group1), len(sim_group2)
\end{lstlisting}
\begin{lstlisting}[style=output]
(3067, 3422)
\end{lstlisting}
Now we can compute the test statistic for the simulated data.
\begin{lstlisting}[language=Python,style=source]
test_stat(sim_group1, sim_group2)
\end{lstlisting}
\begin{lstlisting}[style=output]
-0.014237551111094149
\end{lstlisting}
In the simulated data, the distribution of pregnancy lengths is the same
for both groups, so the difference is usually close to 0.
But because it is based on a random shuffle of the groups, we get a
different value each time we run it.
To see what the whole distribution looks like, we can run the simulation
many times and store the results.
\begin{lstlisting}[language=Python,style=source]
diffs = np.empty(1000)
for i in range(len(diffs)):
np.random.shuffle(pool)
sim_group1, sim_group2 = np.split(pool, [n])
diffs[i] = test_stat(sim_group1, sim_group2)
\end{lstlisting}
The result is the ``sampling distribution of the test statistic under
the null hypothesis''.
The mean of this distribution should close to zero, because it is based
on the assumption that there is actually no difference between the
groups.
\begin{lstlisting}[language=Python,style=source]
np.mean(diffs)
\end{lstlisting}
\begin{lstlisting}[style=output]
0.0021146729470806775
\end{lstlisting}
And here's what the whole distribution looks like.
\begin{lstlisting}[language=Python,style=source]
plot_hist(diffs)
plt.xlabel('Difference in mean (weeks)')
plt.title('Distribution of test statistic under null hypothesis');
\end{lstlisting}
\begin{figure}
\centering
\includegraphics{11_inference_files/11_inference_79_0.pdf}
\caption{png}
\end{figure}
If there were actually no difference between the groups, we would expect
to see a difference as big as 0.15 weeks by chance, at least
occasionally. But a difference as big as 0.2 would be rare.
To quantify that surprise, we can estimate the probability that the test
statistic, under the null hypothesis, exceeds the observed differences
in the means.
The result is a ``p-value''.
\begin{lstlisting}[language=Python,style=source]
p_value = np.mean(diffs >= actual_diff)
p_value
\end{lstlisting}
\begin{lstlisting}[style=output]
0.0
\end{lstlisting}
In this example the result is 0, which is to say that in 1000
simulations of the null hypothesis, we never saw a difference as big as
0.2.
\hypertarget{interpreting-p-values}{%
\section{Interpreting p-values}\label{interpreting-p-values}}
To interpret this result, remember that we started with three possible
explanations for the observed difference between the groups:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
The observed difference might be ``real''; that is, there might be an
actual difference in pregnancy length between first babies and others.
\item
There might be no real difference between the groups, and the observed
difference might be because of a systematic error in the sampling
process or the data collection process. For example, maybe reported
pregnancy lengths are less accurate for first time mothers.
\item
There might be no real difference between the groups, and the observed
difference might be due to random variation in the sampling process.
\end{enumerate}
By computing a p-value, we have established that it would be rare to see
a difference as big as 0.2 due to sampling alone. So we can conclude
that the third explanation is unlikely.
That makes it more likely that the difference is real, but we still
can't rule out the second possibility.
\textbf{Exercise:} The test statistic we chose is the difference in
means between the two groups.
But suppose we would like to know whether first babies are more
unpredictable than other babies. In that case the test statistic we
choose might be the standard deviation of pregnancy length, which is one
way to quantify unpredictability.
As an exercise:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
Write a version of \passthrough{\lstinline!test\_stat!} that computes
the difference in standard deviation between the groups.
\item
Write a loop that estimates the distribution of this test statistic
under the null hypothesis.
\item
Compute a p-value.
\end{enumerate}
\hypertarget{estimation}{%
\section{Estimation}\label{estimation}}
Suppose we want to estimate the average height of men in the U.S.
We can use data from the
\href{https://www.cdc.gov/brfss/index.html}{BRFSS}:
``The Behavioral Risk Factor Surveillance System (BRFSS) is the nation's
premier system of health-related telephone surveys that collect state
data about U.S. residents regarding their health-related risk behaviors,
chronic health conditions, and use of preventive services.''
\begin{lstlisting}[language=Python,style=source]
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/' +
'ElementsOfDataScience/raw/master/brfss.hdf5')
\end{lstlisting}
\begin{lstlisting}[language=Python,style=source]
import pandas as pd
brfss = pd.read_hdf('brfss.hdf5', 'brfss')
brfss.shape
\end{lstlisting}
\begin{lstlisting}[style=output]
(100000, 9)
\end{lstlisting}
We can use \passthrough{\lstinline!SEX!} to select male respondents.
\begin{lstlisting}[language=Python,style=source]
male = (brfss.SEX == 1)
np.mean(male)
\end{lstlisting}
\begin{lstlisting}[style=output]
0.48589
\end{lstlisting}
Then we select height data.
\begin{lstlisting}[language=Python,style=source]
heights = brfss['HTM4']
data = heights[male]
\end{lstlisting}
We can use \passthrough{\lstinline!isnan!} to check for NaN values:
\begin{lstlisting}[language=Python,style=source]
np.mean(np.isnan(data)) * 100
\end{lstlisting}
\begin{lstlisting}[style=output]
4.338430508962934
\end{lstlisting}
About 4\% of the values are missing.
Here are the mean and standard deviation, ignoring missing data.
\begin{lstlisting}[language=Python,style=source]
print('Mean male height in cm =', np.nanmean(data))
print('Std male height in cm =', np.nanstd(data))
\end{lstlisting}
\begin{lstlisting}[style=output]
Mean male height in cm = 177.53804780447925
Std male height in cm = 8.350063691943435
\end{lstlisting}
\hypertarget{quantifying-precision}{%
\section{Quantifying precision}\label{quantifying-precision}}
At this point we have an estimate of the average adult male height. We'd
like to know how accurate this estimate is, and how precise. In the
context of estimation, these words have a
\href{https://en.wikipedia.org/wiki/Accuracy_and_precision}{technical
distinction}:
\begin{quote}
Given a set of data points from repeated measurements of the same
quantity, the set can be said to be precise if the values are close to
each other, while the set can be said to be accurate if their average is
close to the true value of the quantity being measured.
\end{quote}
Usually accuracy is what we really care about, but it's hard to measure
accuracy unless you know the true value. And if you know the true value,
you don't have to estimate it.
Quantifying precision is not as useful, but it is much easier. Here's
one way to do it:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
Use the data you have to make a model of the population.
\item
Use the model to simulate the data collection process.
\item
Use the simulated data to compute an estimate.
\end{enumerate}
By repeating these steps, we can quantify the variability of the
estimate due to random sampling.
To model the population, I'll use \textbf{resampling}; that is, I will
treat the observed measurements as if they were taken from the entire
population, and I will draw random samples from them.
We can use \passthrough{\lstinline!np.random.choice!} to resample the
data:
\begin{lstlisting}[language=Python,style=source]
size = len(data)
sim_data = np.random.choice(data, size, replace=True)
sim_data.shape
\end{lstlisting}
\begin{lstlisting}[style=output]
(48589,)
\end{lstlisting}
With \passthrough{\lstinline!replace=True!}, we sample with replacement,
which means that some measurements might be chosen more than once, and
some might not be chosen at all.
(If we sample \emph{without} replacement, the resampled data is always
identical to the original, so that's no good.)
Now we can use \passthrough{\lstinline!nanmean!} to compute the mean of
the simulated data, ignoring missing values.
\begin{lstlisting}[language=Python,style=source]
np.nanmean(sim_data)
\end{lstlisting}
\begin{lstlisting}[style=output]
177.52799586999075
\end{lstlisting}
If we repeat this process 1000 times, we can see how much the results
vary.
\begin{lstlisting}[language=Python,style=source]
outcomes = np.empty(1000)
size = len(data)
for i in range(len(outcomes)):
sim_data = np.random.choice(data, size, replace=True)
outcomes[i] = np.nanmean(sim_data)
\end{lstlisting}
The result is the ``sampling distribution'', which shows how much the
results of the experiment would vary if we ran it many times. Here's
what it looks like:
\begin{lstlisting}[language=Python,style=source]
plot_hist(outcomes)
plt.title('Sampling distribution of the mean')
plt.xlabel('Mean adult male height, U.S.');
\end{lstlisting}
\begin{figure}
\centering
\includegraphics{11_inference_files/11_inference_105_0.pdf}
\caption{png}
\end{figure}
The width of this distribution shows how much the results vary from one
experiment to the next.
We can quantify this variability by computing the standard deviation of
the sampling distribution, which is called ``standard error''.
\begin{lstlisting}[language=Python,style=source]
std_err = np.std(outcomes)
std_err
\end{lstlisting}
\begin{lstlisting}[style=output]
0.038376132495234826
\end{lstlisting}
We can also summarize the sampling distribution with a ``confidence
interval'', which is a range that contains a specified fraction, like
90\%, of the values in \passthrough{\lstinline!sampling\_dist\_mean!}.
The central 90\% confidence interval is between the 5th and 95th
percentiles of the sampling distribution.
\begin{lstlisting}[language=Python,style=source]
ci_90 = np.percentile(outcomes, [5, 95])
ci_90
\end{lstlisting}
\begin{lstlisting}[style=output]
array([177.47655879, 177.60152771])
\end{lstlisting}
The following function plots a histogram and shades the 90\% confidence
interval.
\begin{lstlisting}[language=Python,style=source]
def plot_sampling_dist(outcomes):
"""Plot sampling distribution.
outcomes: sequence of values
"""
patch = plot_hist(outcomes)
low, high = np.percentile(outcomes, [5, 95])
fill_hist(low, high, patch)
print('Mean = ', np.mean(outcomes))
print('Std error = ', np.std(outcomes))
print('90% CI = ', (low, high))
\end{lstlisting}
Here's what it looks like for the sampling distribution of mean adult
height:
\begin{lstlisting}[language=Python,style=source]
plot_sampling_dist(outcomes)
plt.xlabel('Mean adult male height, U.S. (%)');
\end{lstlisting}
\begin{lstlisting}[style=output]
Mean = 177.5401473153372
Std error = 0.038376132495234826
90% CI = (177.47655879241216, 177.60152770795776)
\end{lstlisting}
\begin{figure}
\centering
\includegraphics{11_inference_files/11_inference_113_1.png}
\caption{png}
\end{figure}
For an experiment like this, we can compute the standard error
analytically.
\begin{lstlisting}[language=Python,style=source]
size = len(data)
analytic_std_err = np.std(data) / np.sqrt(size)
\end{lstlisting}
The result is close to what we observed computationally.
\begin{lstlisting}[language=Python,style=source]
analytic_std_err, std_err
\end{lstlisting}
\begin{lstlisting}[style=output]
(0.03788094526418763, 0.038376132495234826)
\end{lstlisting}
This result indicates that our estimate of the mean is \emph{precise};
that is, if we ran this experiment many times, the results would fall in
a narrow range.
But this range reflects only variability due to random sampling. If
there are systematic errors in the sampling process, or in the
measurement process, the result would not be \emph{accurate}.
Computing a standard error or confidence interval can be useful, but it
only quantifies variability due to random sampling, not other sources of
error.
\textbf{Exercise:} One nice thing about using resampling is that it is
easy to compute the sampling distribution for other statistics.
For example, suppose we want to estimate the coefficient of variation
(standard deviation as a fraction of the mean) for adult male height.
Here's how we can compute it.
\begin{lstlisting}[language=Python,style=source]
cv = np.nanstd(data) / np.nanmean(data)
cv
\end{lstlisting}
\begin{lstlisting}[style=output]
0.04703253074596872
\end{lstlisting}
So the standard deviation is about 5\% of the mean.
Write a loop that uses resampling to estimate the sampling distribution
of \passthrough{\lstinline!cv!}; store the results in an array named
\passthrough{\lstinline!outcomes!}.
Then use \passthrough{\lstinline!plot\_sampling\_dist!} to plot the
sampling distribution of the coefficient of variation.
What is the standard error of the estimated coefficient of variation?
\hypertarget{summary}{%
\section{Summary}\label{summary}}
This chapter presents computational methods for computing p-values,
standard errors, and confidence intervals. The two processes are
similar, but they answer different questions.
The following diagram outlines the hypothesis testing process:
Again, the key steps are
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
Choose a test statistic that quantifies the observed effect.
\item
Define a model of the null hypothesis and use it to generate simulated
data.
\item
Compute the distribution of the test statistic under the null
hypothesis.
\item
Compute a p-value, which is probability, under the null hypothesis, of
seeing an effect as extreme as what you saw.
\end{enumerate}
The following figure shows the similar process for computing standard
errors and confidence intervals.
The essential steps are:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\item
Choose a sample statistic that quantifies the thing you want to
estimate.
\item
Use the data to make a model of the population, assuming that the
estimate is accurate.
\item
Use the model to simulate the sampling process and generate simulated
data.
\item
Compute the sampling distribution of the estimate.
\item
Use the sampling distribution to compute the standard error,
confidence interval, or both.
\end{enumerate}
Finally, remember that both processes only account for variability due
to random sampling. They don't tell us anything about systematic errors
in the sampling process, measurement error, or other sources of error.
| {
"alphanum_fraction": 0.7560997329,
"avg_line_length": 29.9466666667,
"ext": "tex",
"hexsha": "411f6f8355db49eaeb305fe7e00c42bcdcdbc26f",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-03-27T10:41:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-03-27T10:41:22.000Z",
"max_forks_repo_head_hexsha": "3b87dfdd81c68ebd17f84a818326ed87da265ddb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "AllenDowney/ElementsOfDataScienceBook",
"max_forks_repo_path": "book/11_inference.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3b87dfdd81c68ebd17f84a818326ed87da265ddb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AllenDowney/ElementsOfDataScienceBook",
"max_issues_repo_path": "book/11_inference.tex",
"max_line_length": 145,
"max_stars_count": 12,
"max_stars_repo_head_hexsha": "3b87dfdd81c68ebd17f84a818326ed87da265ddb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AllenDowney/ElementsOfDataScienceBook",
"max_stars_repo_path": "book/11_inference.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-27T18:21:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-05-06T13:57:14.000Z",
"num_tokens": 8856,
"size": 33690
} |
\documentclass{llncs}
\usepackage{macros}
\usepackage{lstcoq}
\usepackage{lstocaml}
\usepackage{mathpartir}
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{stmaryrd}
\usepackage{graphics}
\bibliographystyle{plain}
\pagestyle{plain}
\usepackage{array}
% \authorinfo{Thomas Braibant}
% {Inria}
% {[email protected]}
% \authorinfo{Adam Chlipala}
% {MIT}
% {[email protected]}
\title{Formal Verification of Hardware Synthesis}
\author{Thomas Braibant\inst{1} \and Adam Chlipala\inst{2}}
\institute{
Inria Paris-Rocquencourt %\email{[email protected]}
\and MIT CSAIL %\email{[email protected]}
} %
\newcommand{\project}{Fe-Si}
\newcommand{\denote}[1]{\llbracket #1 \rrbracket}
\newcommand{\denotety}[1]{\denote{\mathtt{#1}}_{\mathtt{ty}}}
\newcommand{\denotemem}[1]{\denote{\mathtt{#1}}_{\mathtt{mem}}}
\begin{document}
\maketitle
\begin{abstract}
We report on the implementation of a certified compiler for a
high-level hardware description language (HDL) called \emph{Fe-Si}
(FEatherweight SynthesIs).
%
Fe-Si is a simplified version of Bluespec, an HDL based on a notion
of \emph{guarded atomic actions}. Fe-Si is defined as a
dependently typed deep embedding in Coq. The target language of the
compiler corresponds to a synthesisable subset of Verilog or VHDL.
%
A key aspect of our approach is that input programs to the compiler
can be defined and proved correct inside Coq. Then, we use
extraction and a Verilog back-end (written in OCaml) to get a
certified version of a hardware design.
\end{abstract}
\section*{Introduction}
Verification of hardware designs has been thoroughly investigated, and
yet, obtaining provably correct hardware of significant complexity is
usually considered challenging and time-consuming.
%
On the one hand, a common practice in hardware verification is to take
a given design written in an hardware description language like
Verilog or VHDL and argue about this design in a formal way using a
model checker or an SMT solver.
%
On the other hand, a completely different approach is to design
hardware via a shallow embedding of circuits in a theorem
prover~\cite{hanna-veritas,UCAM-CL-TR-77,hunt89,vamp,certifying-circuits-in-type-theory}.
%
Yet, both kinds of approach suffer from the fact that most hardware
designs are expressed in low-level register transfer languages (RTL)
like Verilog or VHDL, and that the level of abstraction they provide
may be too low to do short and meaningful proof of high-level
properties.
\medskip
To raise this level of abstraction, industry moved to \emph{high-level
hardware synthesis} using higher-level languages, e.g.,
System-C~\cite{systemc}, Esterel~\cite{DBLP:conf/birthday/Berry00} or
Bluespec~\cite{bluespec}, in which a source program is
compiled to an RTL description.
%
High-level synthesis has two benefits.
%
First, it reduces the effort necessary to produce a hardware design.
%
Second, writing or reasoning about a high-level program is simpler
than reasoning about the (much more complicated) RTL description
generated by a compiler.
%
However, the downside of high-level synthesis is that there is no
formal guarantee that the generated circuit description behaves
exactly as prescribed by the semantics of the source
program, making verification on the source program useless in the
presence of compiler-introduced bugs.
%
\medskip In this paper, we investigate the formal verification of a
lightly optimizing compiler from a Bluespec-inspired language called
\project{} to RTL, applying (to a lesser extent) the ideas behind the
CompCert project~\cite{Leroy-backend} to hardware synthesis.
\medskip
Fe-Si can be seen as a stripped-down and idealized version of
Bluespec: hardware designs are described in terms of \emph{guarded
atomic actions} on state elements.
%
Guarded atomic actions have a flavour of \emph{transactional memory}, where
updates to state elements are not visible before the end of the
transaction (a time-step).
%
Our target language can be sensibly interpreted as \emph{clocked
sequential machines}: we generate an RTL description syntactically
described as combinational definitions and next-state assignments.
%
In our development, we define a (dependently typed) deep embedding of
the Fe-Si programming language in Coq using \emph{parametric
higher-order abstract syntax (PHOAS)}~\cite{phoas-chlipala}, and
give it a semantics using an interpreter: the semantics of a program
is a Coq function that takes as inputs the current state of the
state elements and produces their next state.
Fe-Si hits a sweet spot between deep and shallow embeddings: it makes
it possible to use Coq as a meta programming tool to describe
circuits, without the pitfalls usually associated with a deep
embedding (e.g., the handling of binders).
%
This provides an economical path toward succinct and provably correct
description of, e.g., recursive circuits.
\section{Overview of Fe-Si}
Fe-Si is a purely functional language built around a \emph{monad} that
makes it possible to define circuits. We start with a customary
example: a half adder.
\begin{mcoq}
Definition hadd (a b: Var B) : action [] (B $\otimes$ B) :=
$\quad$do carry <- ret (andb a b);
$\quad$do sum $\,$<- ret (xorb a b);
$\quad$ret (carry, sum).
\end{mcoq}
This circuit has two Boolean inputs (\coqe{Var B}) and returns a tuple
of Boolean values (\coqe{B $\otimes$ B}).
%
Here, we use Coq notations to implement some syntactic sugar: we
borrow the \texttt{do}-notation to denote the monadic bind and use
\coqe{ret} as a short-hand for return.
%
% (Our choice of concrete notations is dictated by some limitations in
% Coq's notation mechanism. For instance our explicit use of return may
% seem odd: it is due to the fact that Fe-Si has two classes of
% syntactic values, expressions and actions, and that return takes as
% argument an expression.)
Up to this point, Fe-Si can be seen as an extension of the
Lava~\cite{Bjesse98lava:hardware} language, implemented in Coq rather
than Haskell. Yet, using Coq as a metalanguage offers the possibility
to use dependent types in our circuit descriptions. For instance, one
can define an adder circuit of the following type by induction:
\begin{mcoq}
Definition adder (n: nat) (a b: Var (Int n)): action [] (Int n) := ...
\end{mcoq}
In this definition, \coqe{n} is a formal parameter that denotes the
size of the operands and the size of the result. (Note that
this definition describes an infinite family of adders that can be
proved correct all at once using inductive reasoning. Then, the formal
parameter can be instantiated to yield certified finite-size designs.)
\subsubsection{Stateful programs.}
Fe-Si also features a small set of primitives for interacting with
\emph{memory elements} that hold mutable state. In the following
snippet, we build a counter that increments its value when its input
is true.
\begin{mcoq}
Definition $\Phi$ := [Reg (Int n)].
Definition count n (tick: Var B) : action $\Phi$ (Int n) :=
$\quad$do x <- !member_0;
$\quad$do _ <- if tick then {member_0 ::= x + 1} else {ret ()};
$\quad$ret x.
\end{mcoq}
Here, $\Phi$ is an environment that defines the set of memory elements
(in a broad sense) of the circuit. In the first line, we read the
content of the register at position \coqe{member_0} in $\Phi$, and
bind this value to \coqe{x}. Then, we test the value of the input
\coqe{tick}, and when it is true, we increment the value of the
register. In any case, the output is the old value of the counter.
The above ``if-then-else'' construct is defined using two primitives
for guarded atomic actions that are reminiscent of transactional
memory monads: \coqe{assert} and \coqe{orElse}. The former aborts the
current action if its argument is false.
%
The latter takes two arguments $a$ and $b$, and first executes $a$; if
it aborts, then the effects of $a$ are discarded and $b$ is run. If
$b$ aborts too, the whole action \coqe{$a$ orElse $b$} aborts.
\subsubsection{Synchronous semantics.} Recall that Fe-Si programs are
intended to describe hardware circuits. Hence, we must stress that
they are interpreted in a synchronous setting.
%
From a logical point of view the execution of a program (an atomic
action) is clocked, and at each tick of its clock, the computation of
its effects (i.e., updates to memory elements) is instantaneous: these
effects are applied all at once between ticks.
%
In particular this means that it is not possible to observe, e.g.,
partial updates to the memory elements, nor transient values in
memory.
%
(In Bluespec terminology, this is ``reads-before-writes''.)
\subsubsection{From programs to circuits.} At this point, the reader
may wonder how it is possible to generate circuits in a palatable
format out of Fe-Si programs. Indeed, using Coq as a meta-language to
embed Fe-Si yields two kinds of issues. First, Coq lacks any kind of
I/O; and second, a Fe-Si program may have been built using arbitrary
Coq code, including, e.g., higher-order functions or fixpoints.
Note that every Coq function terminates. Therefore, a closed Fe-Si
program of type \coqe{action} evaluates to a term that is
syntactically built using the inductive constructors of the type
\coqe{action} (i.e., all intermediate definitions in Coq have been expanded).
%
Then we use Coq's extraction, which generates OCaml code from Coq
programs.
%
Starting from a closed Fe-Si program \coqe{foo}, we put the following
definition in a Coq file:
\begin{mcoq}
Definition bar := fesic foo.
\end{mcoq}
The extracted OCaml term that corresponds to \coqe{bar} evaluates (in
OCaml) to a closed RTL circuit. Then, we can use an (unverified)
back-end that pretty-prints this RTL code as regular Verilog code.
%
(This devious use of Coq's extraction mechanism palliates the fact
there is no I/O mechanism in Coq.)
\section{From Fe-Si to RTL}
In this section, we shall present our source (Fe-Si) and target (RTL)
languages, along with their semantics. For the sake of space, we leave
the full description of this compilation process out of the scope of
this paper.
\subsection{The memory model}
Fe-Si programs are meant to describe sequential circuits, whose
``memory footprints'' must be known statically. We take a declarative
approach: each state-holding element that is used in a program must be
declared.
%
We currently have three types of memory elements: inputs, registers,
and register files. A register holds one value of a given type, while a
register file of size $n$ stores $2^n$ values of a given type.
%
An input is a memory element that can only be read by the circuit,
and whose value is driven by the external world.
%
We show the inductive definitions of types and memory elements in
Fig.~\ref{fig:type}.
%
We have four constructors for the type \coqe{ty} of types: \coqe{Unit}
(the unit type), \coqe{B} (Booleans), \coqe{Int} (integers of a given
size), and \coqe{Tuple} (tuples of types). The inductive definition of
memory elements (\coqe{mem}) should be self-explaining.
We endow these inductive definitions with a denotational semantics: we
implement Coq functions that map such reified types to the obvious Coq
types they denote.
\begin{figure}
\centering
\begin{threelistings}
\begin{coq}
Inductive ty : Type :=
| Unit : ty
| B : ty
| Int : nat -> ty
| Tuple : list ty -> ty.
\end{coq}&
\begin{coq}
Inductive mem : Type :=
| Input: ty -> mem
| Reg : ty -> mem
| Regfile : nat -> ty -> mem.
$ $
\end{coq}
&
\begin{coq}
Fixpoint $\denotety{.}$ : ty -> Type := ...
Fixpoint $\denotemem{.}$ : mem -> Type := ...
Fixpoint $\denote{.}$ : list mem -> Type := ...
$ $
\end{coq}
\end{threelistings}
\caption{Types and memory elements}
\label{fig:type}
\end{figure}
\subsection{Fe-Si}
The definition of Fe-Si programs (\coqe{action} in the following)
takes the PHOAS approach~\cite{phoas-chlipala}.
%
That is, we define an inductive type family parameterized by an
arbitrary type \coqe{V} of variables, where binders bind variables
instead of arbitrary terms (as would be the case using
HOAS~\cite{DBLP:conf/pldi/PfenningE88}), and those variables are used
explicitly via a dedicated term constructor.
%
The definition of Fe-Si syntax is split in two syntactic classes:
expressions and actions.
%
Expressions are side-effect free and are built from variables,
constants, and operations.
%
Actions are made of control-flow structures (assertions and
alternatives), binders, and memory operations.
In this work, we follow an intrinsic
approach~\cite{DBLP:journals/jar/BentonHKM12}: we mix the definition
of the abstract syntax and the typing rules from the start. That is,
the type system of the meta-language (Coq) enforces that all Fe-Si
programs are well-typed by construction.
%
Besides the obvious type-oblivious definitions (e.g., it is not
possible to add a Boolean and an integer), this means that the
definition of operations on state-holding elements requires some care.
%
Here, we use dependently typed de Bruijn indices.
\begin{mcoq}
Inductive member : list mem -> mem -> Type :=
| member_0 : forall E t, member (t::E) t
| member_S : forall E t x, member E t -> member (x::E) t.
\end{mcoq}
Using the above definition, a term of type \coqe{member $\Phi$ M} denotes
the fact that the memory element \coqe{M} appears at a given position
in the environment of memory elements $\Phi$.
%
We are now ready to present the (elided) Coq definitions of the
inductives for expressions and actions in Fig.~\ref{fig:fesi}.
%
(For the sake of brevity, we omit the constructors for accesses to
register files, in the syntax and, later, in the semantics. We refer
the reader to the supplementary material~\cite{fesi} for more details.)
%
Our final definition \coqe{Action} of actions is a polymorphic
function from a choice of variables to an action (we refer the reader
to \cite{phoas-chlipala} for a more in-depth explanation of this
encoding strategy).
\begin{figure}[t]
\centering
\begin{coq}
Section t.
Variable V: ty -> Type. Variable $\Phi$: list mem.
Inductive expr: ty -> Type :=
| Evar : forall t (v : V t), expr t
| Eandb : expr B -> expr B -> expr B | ... (* operations on Booleans *)
| Eadd : forall n, expr (Int n) -> expr (Int n) -> expr (Int n) | ...(* operations on words *)
| Efst : forall l t, expr (Tuple (t::l)) -> expr t | ... (* operations on tuples *)
Inductive action: ty -> Type:=
| Return: forall t, expr t -> action t
| Bind: forall t u, action t -> (V t -> action u) -> action u
(* control-flow *)
| OrElse: forall t, action t -> action t -> action t
| Assert: expr B -> action Unit
(* memory operations on registers *)
| RegRead : forall t, member $\Phi$ (Reg t) -> action t
| RegWrite: forall t, member $\Phi$ (Reg t) -> expr t -> action Unit
(* memory operations on register files, and inputs *)
| ...
End t.
Definition Action $\Phi$ t := forall V, action V $\Phi$ t.
\end{coq}
\caption{The syntax of expressions and actions}
\label{fig:fesi}
\end{figure}
\subsubsection{Semantics.}
We endow Fe-Si programs with a simple synchronous semantics: starting
from an initial state, the execution of a Fe-Si program corresponds
to a sequence of atomic updates to the memory elements.
%
Each step goes as follows: reading the state, computing an update to
the state, committing this update.
%
%
\begin{figure*}
\centering
\begin{mathpar}
\inferrule{\Gamma \vdash e \leadsto v} {\Gamma, \Delta \vdash
\mathtt{Return}~e \to \mathtt{Some} (v,\Delta)}
\\
% bind
\inferrule{ \Gamma, \Delta_1 \vdash a \to \mathtt{None} } {\Gamma,
\Delta_1 \vdash \mathtt{Bind}~a~f \to \mathtt{None}} \and
\inferrule{ \Gamma, \Delta_1 \vdash a \to \mathtt{Some}~(v,
\Delta_2) \and \Gamma, \Delta_2 \vdash f~v \to r} {\Gamma,
\Delta_1 \vdash \mathtt{Bind}~a~f \to r}
\\
% assert
\inferrule{\Gamma \vdash e \leadsto \mathtt{true}} {\Gamma, \Delta
\vdash \mathtt{Assert}~e \to \mathtt{Some} (\mathtt{()},\Delta)}
\and \inferrule{\Gamma \vdash e \leadsto \mathtt{false}} {\Gamma,
\Delta \vdash \mathtt{Assert}~e \to \mathtt{None}}
\\
% orElse
\inferrule{ \Gamma, \Delta \vdash a \to \mathtt{Some}~(v,\Delta')}
{\Gamma, \Delta \vdash a~\mathtt{orElse}~b \to
\mathtt{Some}~(v,\Delta')} \and
\inferrule{\Gamma, \Delta \vdash a \to \mathtt{None} \and \Gamma,
\Delta \vdash b \to r}
{\Gamma, \Delta \vdash a~\mathtt{orElse}~b \to r}
\\
% register
\inferrule{\Gamma(r) = v} {\Gamma, \Delta \vdash
\mathtt{RegRead}~r \to \mathtt{Some} (v,\Delta)} \and
\inferrule{\Gamma \vdash e \leadsto v} {\Gamma, \Delta \vdash
\mathtt{RegWrite}~r~e \to \mathtt{Some}
(\mathtt{()},\Delta\oplus(r,v))}
\end{mathpar}
\caption{Dynamic semantics of Fe-Si programs}\label{fig:fesi-sem}
\end{figure*}
The reduction rules of Fe-Si programs are defined in
Fig.~\ref{fig:fesi-sem}. The judgement $\Gamma, \Delta \vdash a \to r$
reads ``in the state $\Gamma$ and with the partial update $\Delta$,
evaluating $a$ produces the result $r$'', where $r$ is either
\coqe{None} (meaning that the action aborted), or %
\coqe{Some (v, $\Delta'$)} (meaning that the action returned the value
\coqe{v} and the partial update $\Delta'$).
%
Note that the PHOAS approach makes it possible to manipulate closed
terms: we do not have rules for $\beta$-reduction, because it is
implemented by the host language.
%
That is, $\Gamma$ only stores the mutable state, and not the variable
values.
%
There are two peculiarities here: first, following the definition of
$\oplus$, if two values are written to a memory element, only the
first one (in program order) is committed; second, reading a register
yields the value that was held at the beginning of the time step.
% (For the interested reader, the reduction rules of Fe-Si can be
% described in terms of layered monads: we have a Reader monad of the
% old state and Writer monad of the state update to implement the
% synchronous aspect of state update; and we stack the Option monad on
% top of the state-change monads to implement the transactional aspect
% of the semantics.)
Finally, we define a wrapper function that computes the next state of
the memory elements, using the aforementioned evaluation relation
(starting with an empty partial update).
\begin{mcoq}
Definition Next {t} {$\Phi$} (st: $\denote{\Phi}$) (A : Action $\Phi$ t) : option ($\denotety{t}$ * $\denote{\Phi}$) := ...
\end{mcoq}
\subsection{RTL}
Our target language sits at the register-transfer level. At this
level, a synchronous circuit can be faithfully described as a set of
state-holding elements, and a next-state function, implemented using
combinational logic~\cite{DBLP:journals/cj/Gordon02}.
%
Therefore, the definition of RTL programs (\coqe{block} in the
following) is quite simple: a program is simply a well-formed sequence
of bindings of expressions (combinational operations, or reads from
state-holding elements), with a list of effects (i.e, writes to
state-holding elements) at the end.
We show the (elided) definition of expressions and sequences of
binders in Fig.~\ref{fig:rtl}.
%
The definition of expressions is similar to the one we used for Fe-Si,
except that we have constructors for reads from memory elements, and
that we moved to ``three-adress code''.
%
(That is, operands are variables, rather than arbitrary expressions.)
%
A telescope (type \coqe{scope A}) is a well-formed sequence of binders
with an element of type \coqe{A} at the end (\coqe{A} is instantiated
later with a list of effects). Intuitively, this definition enforces
that the first binding of a telescope can only read from memory
elements; the second binding may use the first value, or read from
memory elements; and so on and so forth.
A \coqe{block} is a telescope, with three elements at the end: a
guard, a return value, and a (dependently typed) list of effects.
%
The value of the guard (a Boolean) is equal to true when the return
value is valid and the state updates must be committed; and false
otherwise.
%
The return value denotes the outputs of the circuits.
%
The data type \coqe{effects} encodes, for each memory element of the
list $\Phi$, either an effect (a write of the right type), or \coqe{None}
(meaning that this memory element is never written to). (For the sake
of brevity, we omit the particular definition of dependently typed
heterogeneous lists \coqe{DList.T} that we use here.)
\begin{figure}[t]
\centering
\begin{coq}
Section t.
Variable V: ty -> Type. Variable $\Phi$: list mem.
Inductive expr: ty -> Type :=
| Evar : forall t (v : V t), expr t
(* read from memory elements *)
| Einput : forall t, member $\Phi$ (Input t) -> expr t
| Eread_r : forall t, member $\Phi$ (Reg t) -> expr t
| Eread_rf : forall n t, member $\Phi$ (Regfile n t) -> V (Int n) -> expr t
(* Other operations on Booleans, words, tuples, etc. *)
| Emux : forall t, V B -> V t -> V t -> expr t
| Eandb : V B -> V B -> V B | ...
| Eadd : forall n, V (Int n) -> V (Int n) -> expr (Int n) | ...
| Efst : forall l t, V (Tuple (t::l)) -> expr t | ...
Inductive scope (A : Type): Type :=
| Send : A -> scope A
| Sbind : forall (t: ty), expr t -> (V t -> scope A) -> scope A.
Inductive write : mem -> Type :=
| WR : forall t, V t -> V B -> write (Reg t)
| WRF : forall n t, V t -> V (Int n) -> V B -> write (Regfile n t).
Definition effects := DList.T (option $\circ$ write) $\Phi$.
Definition block t := scope (V B * V t * effects).
End t.
Definition Block $\Phi$ t := forall V, block V $\Phi$ t.
\end{coq}
\caption{RTL programs with three-adress code expressions}
\label{fig:rtl}
\end{figure}
\subsubsection{Semantics.} We now turn to define the semantics of our RTL
language.
%
First, we endow closed expressions with a denotation function (in the
same way as we did at the source level, except that it is not a
recursive definition).
%
Note that we instantiate the variable parameter of \coqe{expr} with
the function $\denotety{.}$, effectively tagging variables with their
denotations.
\begin{mcoq}
Variable $\Gamma$: $\denote{\Phi}$.
Definition eval_expr (t : ty) (e : expr $\denotety{.}$ t) : $\denotety{.}$:= ...
\end{mcoq}
\noindent The denotation of telescopes is a simple recursive function that
evaluates bindings in order and applies an arbitrary function on the
final (closed) object.
\begin{mcoq}
Fixpoint eval_scope {A B} (F : A -> B) (T : scope $\denotety{.}$ A) : B := ...
\end{mcoq}
%
The final piece that we need is the denotation that corresponds to the
\coqe{write} type. This function takes as argument a single effect,
the initial state of this memory location, and either returns a new
state for this memory location, or returns \coqe{None}, meaning that
location is left in its previous state.
\begin{mcoq}
Definition eval_effect (m : mem) : option (write $\denotety{.}$ m) -> $\denotemem{m}$ -> option $\denotemem{m}$ := ...
\end{mcoq}
%
Using all these pieces, it is quite easy to define what is the final
next-state function.
\begin{mcoq}
Definition Next {t} {$\Phi$} ($\Gamma$: $\denote{\Phi}$) (B : Block $\Phi$ t) : option ($\denotety{t}$ * $\denote{\Phi}$) := ...
\end{mcoq}
\subsection{Compiling Fe-Si to RTL}
Our syntactic translation from Fe-Si to RTL is driven by the fact that
our RTL language does not allow clashing assignments: syntactically,
each register and register file is updated by at most one \coqe{write}
expression.
\subsubsection{From control flow to data flow.}To do so, we have to
transform the control flow (the \coqe{Assert} and \coqe{OrElse}) of
Fe-Si programs into data-flow.
%
We can do that in hardware, because circuits are inherently parallel:
for instance, the circuit that computes the result of the conditional
expression \mbox{\coqe{e ? a : b}} is a circuit that computes the
value of \coqe{a} and the value of \coqe{b} in parallel and then uses
the value of \coqe{e} to select the proper value for the whole
expression.
\subsubsection{Administrative normal form.} Our first compilation pass
transforms Fe-Si programs into an intermediate language that
implements A-normal form. That is, we assign a name to every
intermediate computation.
%
In order to do so, we also have to resolve the control flow. To be
more specific, given an expression like
\begin{mcoq}
do x <- (A OrElse B); ...
\end{mcoq}
we want to know statically to what value \coqe{x} needs to be bound
and when this value is \emph{valid}.
%
In this particular case, we remark that if \coqe{A} yields a value
$v_A$ which is valid, then \coqe{x} needs to be bound to $v_A$; if
\coqe{A} yields a value that is invalid, then \coqe{x} needs to be
bound to the value returned by \coqe{B}. In any case, the value bound
in \coqe{x} is valid whenever the value returned by \coqe{A} or the
value returned by \coqe{B} is valid.
More generally, our compilation function takes as argument an
arbitrary function, and returns a telescope that binds three values:
(1) a \emph{guard}, which denotes the validity of the following
components of the tuple; %
(2) a \emph{value}, which is bound by the telescope to denote the value
that was returned by the action; %
(3) a list of \emph{nested effects}, which are a lax version of the
effects that exist at the \coqe{RTL} level.
The rationale behind these nested effects is to represent trees of
conditional blocks, with writes to state-holding elements at the
leaves. (Using this data type, several paths in such a tree may lead
to a write to a given memory location; in this case, we use a notion
of program order to discriminate between clashing assignments.)
\subsubsection{Linearizing the effects} Our second compilation pass
flattens the nested effects that were introduced in the first pass.
%
The idea of this translation is to associate two values to each
register: a \emph{data} value (the value that ought to be written) and
a \emph{write-enable} value. The data value is committed (i.e., stored)
to the register if the write-enable Boolean is true.
%
Similarly, we associate three values to each register-file: a data value, an
address, and a write-enable. The data is stored to the field of the
register file selected by the address if the write-enable is true.
The heart of this translation is a \coqe{merge} function that takes
two writes of the same type, and returns a telescope that
encapsulates a single \coqe{write}:
\begin{mcoq}
Definition merge s (a b : write s): scope (option (write s)) := ...
\end{mcoq}
% Despite requiring a bit of dependent types hackery\footnote{The
% reader may wonder why we need an option here; the answer is that we
% could do without it, at the price of a more complicated definition
% for \coqe{merge} that uses the fact that the type
% %
% \mbox{\coqe{write (Input t)}} is not inhabited.}, the definition of
% \mbox{\coqe{merge}} is the expected one.
%
For instance, in the register case, given $(v_a,e_a)$ (resp. $(v_b,
e_b)$) the data value and the write-enable that correspond to
\coqe{a}, the write-enable that corresponds to the merge of \coqe{a}
and \coqe{b} is $e_a || e_b$, and the associated data value is
\mbox{$e_a~?~v_a : v_b$}.
\subsubsection{Moving to RTL.} The third pass of our compiler translates
the previous intermediate language to RTL, which amounts to a simple
transformation into three-address code. This transformation simply
introduces new variables for all the intermediate expressions that
appear in the computations.
\subsection{Lightweight optimizations}
We will now describe two optimizations that we perform on programs
expressed in the RTL language.
%
The first one is a syntactic version of common sub-expression
elimination, intended to reduce the number of bindings and introduce
more sharing.
%
The second is a semantic common sub-expression elimination that aims
to reduce the size of the Boolean formulas that were generated in the
previous translation passes.
\subsubsection{Syntactic common-subexpression elimination.}
We implement CSE with a simple recursive traversal of RTL
programs. (Here we follow the overall approach used by
Chlipala~\cite{DBLP:conf/popl/Chlipala10}.)
%
Contrary to our previous transformations that were just ``pushing
variables around'' for each possible choice of variable representation
\coqe{V}, here we need to tag variables with their symbolic values,
which approximate the actual values held by variables.
%
%
Then, CSE goes as follows. We fold through a telescope and maintain a
mapping from symbolic values to variables. For each binder of the
telescope, we compute the symbolic representation of the expression
that is bound.
%
If this symbolic value is already in the map, we avoid the creation of
an extraneous binder. Otherwise, we do create a new binder, and extend
our association list accordingly.
\subsubsection{Using BDDs to reduce Boolean expressions.}
Our compilation process introduces a lot of extra Boolean
variables. We use BDDs to implement semantic common-subexpression
elimination. We implemented a BDD library in Coq; and we use it to
annotate each Boolean expression of a program with an approximation of
its runtime value, i.e., a pointer to a node in a BDD.
%
Our use of BDDs boils down to hash-consing: it enforces that Boolean
expressions that are deemed equivalent are shared.
% The purpose of this pass is to simplify the extraneous boolean
% operations that were introduced by our compilation passes. In order to
% simplify only the Boolean computations that we introduced, we could
% use two different kinds of Booleans (the ones that were present at the
% source level and the others); and use our simplification pass only on
% the latter.
\subsection{Putting it all together}
In the end, we prove that our whole compiler is semantics preserving.
\begin{coq}
Variable ($\Phi$: list mem) (t : ty).
Definition fesic (A : Fesi.Action $\Phi$ t) : RTL.Block $\Phi$ t :=
$\quad$let x := IR.Compile $\Phi$ t A in let x := RTL.Compile $\Phi$ t x in
$\quad$let x := CSE.Compile $\Phi$ t x in BDD.Compile $\Phi$ t x.
Theorem fesic_correct : forall A ($\Gamma$ : $\denote{\Phi}$), Front.Next $\Gamma$ A = RTL.Next $\Gamma$ (fesic A).\end{coq}
A corollary of this theorem is that, given a certified Fe-Si design,
our compiler produces correct by construction RTL code. Therefore, in
the following, we will focus on the verification of (families of)
Fe-Si designs.
\section{Design and verification of a sorter core}
We now turn to the description of a first hardware circuit implemented
and proved correct in Fe-Si.
%
A \emph{sorting network}~\cite{DBLP:books/mg/CormenLRS01} is a
parallel sorting algorithm that sorts a sequence of values using only
compare-and-swap operations, in a data-independent way. This makes it
suitable for a hardware implementation.
Bitonic sorters for sequences of length $2^n$ can be generated using
short and simple algorithmic descriptions. Yet, formally proving their
correctness is a challenge that was only partially solved in two
different lines of previous work.
%
First, sorter core generators were studied from a hardware design
perspective in Lava~\cite{DBLP:conf/charme/ClaessenSS01}, but formal
proof is limited to circuits with a fixed size -- bounded by the
performances of the automated verification tools.
%
Second, machine-checked formal proofs of bitonic sort were performed
e.g., in Agda~\cite{DBLP:conf/types/BoveC04}, but without a
connection with an actual hardware implementation.
%
Our main contribution here is to implement such generators and to
propose a formal proof of their correctness.
More precisely, we implemented a version of bitonic sort as a regular
Coq program and proved that it sorts its inputs. This proof follows
closely the one described by Bove and
Coquand~\cite{DBLP:conf/types/BoveC04} -- in Agda -- and amounts to
roughly 1000 lines of Coq, including a proof of the so-called
\mbox{0-1~principle}\footnote{That is, a (parametric) sorting network
is valid if it sorts all sequences of 0s and 1s.}.
Then, we implemented a version of bitonic sort as a Fe-Si program,
which mimicked the structure of the previous one. We present
side-by-side the Coq implementation of \coqe{reverse} in
Fig.~\ref{fig:reverse}.
%
The version on the left-hand side can be seen as a specification: it
takes as argument a sequence of $2^n$ inputs (represented as a
complete binary tree of depth $n$) and reverses the order of this
sequence.
%
The code on the right-hand side implements part of the
connection-pattern of the sorter. More precisely, it takes as input a
sequence of input variables and builds a circuit that outputs this
sequence in reverse order.
Next, we turn to the function that is at the heart of the bitonic
sorting network.
%
A bitonic sequence is a sequence $(x_i)_{0 \le i < n}$ whose
monotonicity changes fewer than two times, i.e.,
$$ x_0 \le \cdots \le x_k \ge \cdots x_n, \text{with } 0 \le k < n $$
or a circular shift of such a sequence.
%
Given a bitonic input sequence of length $2^n$, the left-hand side
\coqe{min_max_swap} returns two bitonic sequences of length $2^{n-1}$,
such that all elements in the first sequence are smaller or equal to
the elements in the second sequence.
%
The right-hand side version of this function builds the corresponding
comparator network: it takes as arguments a sequence of input
variables and returns a circuit.
We go on with the same ideas to finish the Fe-Si implementation of
bitonic sort. The rest of the code is unsurprising, except that it
requires to implement a dedicated bind operation of type
\begin{mcoq}
forall (U: ty) n, Var (domain n) -> (T n -> action [] Var U) -> action [] Var U.
\end{mcoq}
that makes it possible to recover the tree structure out of the
result type of a circuit (\coqe{domain n}).
\begin{figure}[t]
\centering
\begin{twolistings}
\begin{coq}
(* Lists of length $2^n$ represented as trees *)
Inductive tree (A: Type): nat -> Type :=
| L : forall x : A, tree A 0
| N : forall n (l r : tree A n), tree A (S n).
$ $
Definition leaf {A n} (t: tree A 0) : A := ...
Definition left {A n} (t: tree A (S n)) : tree A n := ...
Definition right {A n} (t: tree A (S n)) : tree A n := ...
Fixpoint reverse {A} n (t : tree A n) :=
match t with
| L x => L x
| N n l r =>
let r := (reverse n r) in
let l := (reverse n l) in
N n r l
end.
$ $
Variable cmp: A -> A -> A * A.
Fixpoint min_max_swap {A} n :
forall (l r : tree A n), tree A n * tree A n :=
match n with
| 0 => fun l r =>
let (x,y) := cmp (leaf l) (leaf r) in (L x, L y)
| S p => fun l r =>
let (a,b) := min_max_swap p (left l) (left r) in
let (c,d) := min_max_swap p (right l) (right r) in
(N p a c, N p b d)
end.
...
Fixpoint sort n : tree A n -> tree A n := ...
\end{coq}
& $\quad$
\begin{coq}
Variable A : ty.
Fixpoint domain n := match n with
| 0 => A
| S n => (domain n) $\otimes$ (domain n)
end.
Notation T n := tree (expr Var A) n.
Notation C n := action nil Var (domain n).
Fixpoint reverse n (t : T n) : C n :=
match t with
| L x => ret x
| N n l r =>
do r <- reverse n r;
do l <- reverse n l;
ret [tuple r, l]
end.
Notation mk_N x y := [tuple x,y].
Variable cmp : Var A -> Var A
$\qquad\qquad\qquad$ -> action nil Var (A $\otimes$ A).
Fixpoint min_max_swap n :
forall (l r : T n), C (S n) :=
match n with
| 0 => fun l r =>
cmp (leaf l) (leaf r)
| S p => fun l r =>
do a,b <- min_max_swap p (left l) (left r);
do c,d <- min_max_swap p (right l) (right r);
ret ([tuple mk_N a c, mk_N b d])
end.
...
Fixpoint sort n : T n -> C n := ...
\end{coq}
\end{twolistings}
\caption{Comparing the specification and the Fe-Si implementation}
\label{fig:reverse}
\end{figure}
We are now ready to state (and prove) the correctness of our sorter
core. We chose to fix a context where the type of data that we
sort are integers of a given size, but we could generalize this proof
to other data types, e.g., to sort tuples in a lexicographic order.
We side-step the presentation of some of our Coq encoding, to present the final
theorem in a stylized fashion.
\begin{theorem}
Let $I$ be a sequence of length $2^n$ of integers of size $m$. The
circuit always produces an output sequence that is a sorted permutation of $I$.
\end{theorem}
(Note that this theorem states the correctness of the Fe-Si
implementation against a specification of sorted sequences that is
independent of the implementation of the sorting algorithm in the
left-hand side of Fig.~\ref{fig:reverse}; the latter only serves as a
convenient intermediate step in the proof.)
% \begin{tabular}{|l|}
% \hline
% \begin{minipage}{1.0\linewidth}
% Alternatively, I wonder if the following definition could appear
% in the paper: Eval is the Fe-si evaluation function. Circuit.sort is
% the circuit described in the right-hand side of
% Fig.~\ref{fig:reverse}; Spec.sort is the algorithm that appears in
% the left-hand side of Fig.~\ref{fig:reverse}. Spec.tsorted is the
% specification of what it means to be ``sorted'' for sequences
% represented as trees. The relation $\equiv$ relates sequences
% represented as trees and sequences represented as tuples (type
% domain n) in the circuit world...
% \end{minipage}\\
% \begin{minipage}{1.0\linewidth}
% \begin{lstlisting}[language=Coq]
% Notation A := (Int m).
% Theorem circuit_sort_correct n (I : tree (Var A) n) :
% match Eval (Circuit.sort n I) with
% | None => False (* i.e., the circuit always outputs a value *)
% | Some (_,out) => out $\equiv$ (Spec.sort n I) /\ Spec.tsorted n (Spec.sort n I)
% end.
% \end{lstlisting}
% \end{minipage}\\
% \hline
% \end{tabular}
\subsubsection{Testing the design}
Finally, we indulge ourselves and test a design that was formally
proven, using a stock Verilog simulator~\cite{iverilog}. We set the
word size and the number of inputs of the sorter, and we generate the
corresponding Verilog code. Unsurprisingly, the sorter core sorts its
input sequence in every one of our test runs.
\section{Verifying a stack machine}
The circuit that was described in the previous section is a simple
combinational sorter: we could have gone one step further in this
verification effort and pipelined our design by registering the output
of each compare-and-swap operator. However, we chose here to describe
a more interesting design: a hardware implementation of a simple stack
machine for the IMP language, i.e., a tiny subset of the Java virtual
machine.
Again, we proceed in two steps: first, we define a specification of
the behavior of our stack machine; second, we build a Fe-Si
implementation and prove that it behaves as prescribed.
%
The instruction set of our machine is given in Fig.~\ref{fig:stack},
where we let $x$ range over identifiers (represented as natural
numbers) and $n,\delta$ range over values (natural numbers).
%
The state of the machine is composed of the code (a list of
instruction), a program counter (an integer), a variable stack (a list
of values), and a store (a mapping from variables to values). The
semantics of the machine is given by a one-step transition relation in Fig.~\ref{fig:stack}.
%
Note that this specification uses natural numbers and lists in a
pervasive manner: this cannot be faithfully encoded using finite-size
machine words and register files.
%
For simplicity reasons, we resolve this tension by adding some dynamic
checks (that do not appear explicitly on Fig.~\ref{fig:stack}) to the
transition relation to rule out such ill-defined behaviors. (Note
that this is an alternative to the use of finite-size machine words in
the model; it would catch assembly programs with bugs related to
overflows.)
\begin{figure}
\centering
\begin{twolistings}
\begin{tabular}{rcll}
i & ::= & \texttt{const $n$ }\\
& $|$ & \texttt{var $x$ }\\
& $|$ & \texttt{setvar $x$ }\\
& $|$ & \texttt{add }\\
& $|$ & \texttt{sub }\\
& $|$ & \texttt{bfwd $\delta$ }\\
& $|$ & \texttt{bbwd $\delta$ }\\
& $|$ & \texttt{bcond $c$ $\delta$ }\\
\\
& $|$ & \texttt{halt }\\
\end{tabular}
& \qquad
\begin{tabular}{ll}
$C \vdash pc,\sigma,s \to pc+1, n :: \sigma,s$ & \text{if $C(pc)$ = \texttt{const $n$}} \\
$C \vdash pc,\sigma,s \to pc+1, s(x) :: \sigma,s$ & \text{if $C(pc)$ = \texttt{var $x$}} \\
$C \vdash pc,v::\sigma,s \to pc+1, \sigma,s[x \leftarrow v]$ & \text{if $C(pc)$ = \texttt{setvar $x$}} \\
$C \vdash pc,n_2::n_1::\sigma,s \to pc+1, (n_1+n_2)::\sigma,s$ & \text{if $C(pc)$ = \texttt{add}} \\
$C \vdash pc,n_2::n_1::\sigma,s \to pc+1, (n_1-n_2)::\sigma,s$ & \text{if $C(pc)$ = \texttt{sub}} \\
$C \vdash pc,\sigma,s \to pc+1+\delta, \sigma,s$ & \text{if $C(pc)$ = \texttt{bfwd $\delta$}} \\
$C \vdash pc,\sigma,s \to pc+1-\delta, \sigma,s$ & \text{if $C(pc)$ = \texttt{bbwd $\delta$}} \\
$C \vdash pc,n_2::n_1::\sigma,s \to pc+1+\delta, \sigma,s$ & \text{if $C(pc)$ = \texttt{bcond c $\delta$} and $c~n_1~n_2$} \\
$C \vdash pc,n_2::n_1::\sigma,s \to pc+1, \sigma,s$ & \text{if $C(pc)$
= \texttt{bcond c $\delta$} and $\neg (c~n_1~n_2)$} \\
\texttt{no reduction}
\end{tabular}
\end{twolistings}
\caption{Instruction set and transition relation of our stack machine}
\label{fig:stack}
\end{figure}
The actual Fe-Si implementation is straightforward. The definition of
the internal state is depicted below: the stack is implemented using a
register file and a stack pointer; the store is a simple register
file; the code is implemented as another register file that is
addressed by the program counter.
\begin{twolistings}
\begin{coq}
Variable n : nat.
Notation OPCODE := (Tint 4).
Notation VAL := (Tint n).
Definition INSTR := OPCODE $\otimes$ VAL.
$ $
\end{coq}
&
\begin{coq}
Definition $\Phi$ : state := [
Tregfile n INSTR; (* code *)
Treg VAL; (* program counter *)
Tregfile n VAL; (* stack *)
Treg VAL; (* stack pointer *)
Tregfile n VAL (* store *)].
\end{coq}
\end{twolistings}
The implementation of the machine is unsurprising: we access
the code memory at the address given by the program counter; we
case-match over the value of the opcode and update the various
elements of the machine state accordingly.
%
For the sake of space, we only present the code for the \texttt{setvar
$x$} instruction below.
% elided the #i notation in front of 1
\begin{twolistings}
\begin{coq}
Definition pop :=
do sp <- ! SP;
do x <- read STACK [: sp - 1];
do _ <- SP ::= sp - 1;
ret x.
\end{coq}
&
\begin{coq}
Definition Isetvar pc i :=
do v <- pop;
do _ <- write REGS [: snd i <- v];
PC ::= pc + 1.
$ $
\end{coq}
\end{twolistings}
\subsubsection{Correctness.} We are now ready to prove that our
hardware design is a sound implementation of its specification.
%
First, we define a logical relation that relates the two
encodings of machine state (in the specification and in the
implementation), written $\equiv$.
%
Then, we prove a simulation property between related states.
\begin{theorem}
Let $s_1$ be a machine state as implemented in the specification and
$m_1$ the machine state as implemented in the circuit, such that
$s_1 \equiv m_1$.
%
If $s_1$ makes a transition to $s_2$, then $m_1$ makes a transition
to $m_2$ such that $s_2 \equiv m_2$.
\end{theorem}
Note that we do not prove completeness here: it is indeed the case
that our implementation exhibits behaviors that cannot be mapped to
behaviors of the specification.
%
In this example, the specification should be regarded as an
abstraction of the actual behaviors of the implementation, which could
be used to reason about the soundness of programs, either written by
hand or produced by a certified compiler.
%
\subsubsection{Testing the design}
Again, we compiled our Fe-Si design to an actual Verilog
implementation. We load binary blobs that correspond to test programs
in the code memory, and run it while monitoring the content of given
memory locations. This gives rise to an highly stylized way of
computing e.g., the Fibonacci sequence.
\section{Comparison with related work}\label{sec:rw}
Fe-Si marries hardware design, functional programming and inductive
theorem proving.
%
We refer the reader to Sheeran~\cite{DBLP:journals/jucs/Sheeran05} for
a review of the use of functional programming languages in hardware
design, and only discuss the most closely related work.
\medskip
Lava~\cite{Bjesse98lava:hardware} embeds parametric circuit generators
in Haskell.
%
Omitting the underlying implementation language, Lava can be described
as a subset of Fe-Si, with two key differences.
%
First, Lava features a number of layout primitives, which makes it
possible to describe more precisely what should be the hardware
layout, yielding more efficient FPGA implementation. We argue that
these operators are irrelevant from the point of view of verification
and could be added to Fe-Si if needed.
%
Second, while Lava features ``non-standard'' interpretations of
circuits that make it possible to prove the correctness of fixed-size
tangible representations of circuits, our embedding of Fe-Si in Coq
goes further: it makes it possible to prove the correctness of
parametric circuit generators.
\medskip
Bluespec SystemVerilog~\cite{bsv-by-example} (BSV) is an industrial
strength hardware description language based on non-deterministic
guarded atomic actions. BSV features a module system, support for
polymorphic functions and modules, support for scheduling directives,
and support for static elaboration of programs (e.g., loops to express
repetitive code).
%
After static elaboration, a program is a set of rewrite rules in a
Term Rewriting System that are non-deterministically executed one at a
time.
%
To implement a Bluespec program in hardware, the Bluespec compiler
needs to generate a deterministic schedule where one or more rules
happen each clock-cycle.
%
Non-determinism makes it possible to use Bluespec both as an
implementation language and as a specification language.
%
Fe-Si can be described as a deterministic idealized version of
Bluespec.
%
We argue that deterministic semantics are easier to reason with, and
that we can use the full power of Coq as a specification language to
palliate our lack of non-determinism in the Fe-Si language.
%
Moreover, more complex scheduling can be implemented using a few
program combinators~\cite{DBLP:conf/memocode/DaveAP07}: we look
forward to implementing these in Fe-Si.
%
Finally, note that using Coq as a meta language makes it possible to
express (and verify) static elaboration of Fe-Si programs, similarly
to what BSV provides for the core of Bluespec.
%
We still have to determine to what extent some parts of BSV module and
type systems could be either added in Fe-Si or encoded in
Coq-as-a-meta-language.
\medskip
The synchronous programming language Quartz~\cite{quartz} is part of
the Averest project that aims at building tools for the development
and verification of reactive systems.
%
Quartz is a variant of Esterel that can be compiled to
``guarded commands''~\cite{DBLP:conf/acsd/Schneider01}. The algorithms
underlying this compiler have been verified in the HOL theorem prover,
which is similar to our approach of verified high-level synthesis.
%
However, in the Averest tool-chain, the verification (automated or
interactive using HOL) takes place at the level of the guarded
commands; yet, despite the naming similarity, guarded commands are
closer to our RTL effects than to our guarded atomic actions.
%
The main difference between our approaches is the place at which
verification occurs: in Fe-Si, verification occurs at the level of
circuit generators, written in the source language and where
high-level constructs are still present; in Averest, it
occurs at the intermediate representation level, in which
high-level constructs have been transformed.
\medskip
Richards and Lester~\cite{DBLP:journals/isse/RichardsL11} developed a
shallow embedding of a subset of Bluespec in PVS. While they do not
address circuit generators or the generation of RTL code, they proved
the correctness of a three-input fair arbiter and a two-process
implementation of Peterson's algorithm that complements our case
studies (we have not attempted to translate these examples into Fe-Si).
Slind et al~\cite{DBLP:journals/fac/SlindOIG07} built a compiler that
creates correct-by-construction hardware implementations of arithmetic
and cryptographic functions that are implemented in a synthesisable
subset of HOL. Parametric circuits are not considered.
Centaur Technology and the University of Texas have developed a formal
verification framework~\cite{DBLP:conf/memocode/SlobodovaDSH11} that
makes it possible to verify RTL and transistor level code.
%
They implement industrial level tools tied together in the ACL2
theorem prover and focus on hardware validation (starting from
existing code). By contrast, we focus on high-level hardware
synthesis, with an emphasis on the verification of parametric designs.
\section{Conclusion}
Our compiler is available on-line along with our examples as
supplementary material~\cite{fesi}.
%
The technical contributions of this paper are:
%
a certified compiler from Fe-Si, a simple hardware description
language, to RTL code;
%
and machine checked proofs of correctness for some infinite families
of hardware designs expressed in Fe-Si (parameterized by sizes), which
are compiled to correct hardware designs using the above compiler.
This work is intended to be a proof of concept: much remains to be
done to scale our examples to more realistic designs and to make our
compiler more powerful (e.g., improving on our current
optimizations). Yet, we argue that it provides an economical path to
certification of parameterized hardware designs.
\subsubsection{Acknowledgements.} We thank MIT's CSG group for
invaluable discussions and comments.
%
Part of this research was done while the first author was visiting MIT
from University of Grenoble. This material is based on research
sponsored by DARPA under agreement number FA8750-12-2-0110.
% The U.S. Government is authorized to reproduce and distribute reprints
% for Governmental purposes notwithstanding any copyright notation
% thereon. The views and conclusions contained herein are those of the
% authors and should not be interpreted as necessarily representing the
% official policies or endorsements, either expressed or implied, of
% DARPA or the U.S. Government.
\bibliography{synthesis}
\end{document}
| {
"alphanum_fraction": 0.7325775586,
"avg_line_length": 40.8483146067,
"ext": "tex",
"hexsha": "1ffe3c1b97fcf1ac055a55fd2db249079d86f2be",
"lang": "TeX",
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2020-10-21T22:19:58.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-07-24T20:16:16.000Z",
"max_forks_repo_head_hexsha": "922982aaddb8a7a16101ff304c45d24a6265dc2e",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "teodorov/Synthesis",
"max_forks_repo_path": "papers/cav-13/main.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "922982aaddb8a7a16101ff304c45d24a6265dc2e",
"max_issues_repo_issues_event_max_datetime": "2015-08-01T08:13:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-08-01T08:13:41.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "teodorov/Synthesis",
"max_issues_repo_path": "papers/cav-13/main.tex",
"max_line_length": 128,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "922982aaddb8a7a16101ff304c45d24a6265dc2e",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "braibant/Synthesis",
"max_stars_repo_path": "papers/cav-13/main.tex",
"max_stars_repo_stars_event_max_datetime": "2020-09-13T22:34:36.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-23T02:34:49.000Z",
"num_tokens": 13780,
"size": 50897
} |
\documentclass[12pt]{article}
\input{physics1}
\begin{document}
\section*{NYU Physics I---pulleys and blocks}
The instructor will draw two machines on the board. The first machine
consists of three blocks, of masses 7, 4, and $3\,\kg$, and two
pulleys (and three strings). The second machine consists of two
blocks, of masses 4 and $3\,\kg$, and one pulley (and two strings).
Choose a partner and work in pairs. Each member of each pair should
write down---on paper---their answers to each part of this worksheet.
You don't have to agree with your partner, but you must discuss and
understand one another.
\paragraph{\theproblem}\refstepcounter{problem}%
In the three-block machine, do you expect the $7\,\kg$ block to
accelerate upwards, or downwards, or not accelerate? You might have a
quick answer to this and want to move on, but talk it out. Consider
extreme scenarios (that is, consider changing the masses of the 3 and
$4\,\kg$ blocks). Are you \emph{sure} about your prediction?
\paragraph{\theproblem}\refstepcounter{problem}%
For the two-block machine, draw free-body diagrams for each of the two
blocks, and for the pulley. Make sure you can justify what you have
drawn. Make sure you are happy with your free-body diagram for the
pulley.
\paragraph{\theproblem}\refstepcounter{problem}%
We are going to assume that the strings in this problem are
inextensible. Why are we going to assume that? What does it do for us?
\paragraph{\theproblem}\refstepcounter{problem}%
We are going to assume that the strings and pulleys are massless. What
does that do for us? How does that help us?
\paragraph{\theproblem}\refstepcounter{problem}%
We are going to assume that the pulleys are frictionless. How does
\emph{that} help us?
\paragraph{\theproblem}\refstepcounter{problem}%
What is the relationship between the accelerations of the two blocks?
Set up a coordinate system and describe this relationship with an
equation.
\paragraph{\theproblem}\refstepcounter{problem}%
Put it all together and solve for the accelerations for the two
blocks, and the tensions in the two strings.
\paragraph{\theproblem}\refstepcounter{problem}%
What is the tension in the top string, and how does it compare to the
total mass times the acceleration due to gravity? Does this change
your thinking about the first question on this worksheet?
\end{document}
| {
"alphanum_fraction": 0.779286927,
"avg_line_length": 41.3333333333,
"ext": "tex",
"hexsha": "6c4ea00c8238ce15f25f0d614214f6e5d9dfe3ca",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6723ce2a5088f17b13d3cd6b64c24f67b70e3bda",
"max_forks_repo_licenses": [
"CC-BY-4.0"
],
"max_forks_repo_name": "davidwhogg/Physics1",
"max_forks_repo_path": "tex/worksheet_pulleys.tex",
"max_issues_count": 29,
"max_issues_repo_head_hexsha": "6723ce2a5088f17b13d3cd6b64c24f67b70e3bda",
"max_issues_repo_issues_event_max_datetime": "2019-01-29T22:47:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-10-07T19:48:57.000Z",
"max_issues_repo_licenses": [
"CC-BY-4.0"
],
"max_issues_repo_name": "davidwhogg/Physics1",
"max_issues_repo_path": "tex/worksheet_pulleys.tex",
"max_line_length": 70,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "6723ce2a5088f17b13d3cd6b64c24f67b70e3bda",
"max_stars_repo_licenses": [
"CC-BY-4.0"
],
"max_stars_repo_name": "davidwhogg/Physics1",
"max_stars_repo_path": "tex/worksheet_pulleys.tex",
"max_stars_repo_stars_event_max_datetime": "2017-11-13T03:48:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-11-13T03:48:56.000Z",
"num_tokens": 606,
"size": 2356
} |
\section{User Guide}
This section is to outline the steps needed to setup a spherical pendulum in python using Basilisk.
\begin{enumerate}
\item Import the sphericalPendulum class: \newline \textit{from Basilisk.simulation import sphericalPendulum}
\item Create an instantiation of a spherical pendulum particle: \newline \textit{particle1 = sphericalPendulum.SphericalPendulum()}
\item Define all physical parameters for a spherical pendulum particle. For example: \newline
\textit{particle1.r\_PB\_B = [[0.1], [0], [-0.1]]}
Do this for all of the parameters for a spherical pendulum seen in the public variables in the .h file.
\item Define the initial conditions of the states:\newline
\textit{particle1.phiInit = 0.05 \quad particle1.phiDotInit = 0.0}
\item Define a unique name for each state:\newline
\textit{particle1.nameOfPhiState = "sphericalPendulumPhi" \quad particle1.nameOfPhiDotState = "sphericalPendulumPhiDot"}
\item Finally, add the particle to your spacecraftPlus:\newline
\textit{scObject.addStateEffector(particle1)}. See spacecraftPlus documentation on how to set up a spacecraftPlus object.
\end{enumerate}
| {
"alphanum_fraction": 0.7910839161,
"avg_line_length": 63.5555555556,
"ext": "tex",
"hexsha": "02dcb53dd2a7bb454d336c48af3831f3c20fc2df",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14",
"max_forks_repo_licenses": [
"0BSD"
],
"max_forks_repo_name": "ian-cooke/basilisk_mag",
"max_forks_repo_path": "src/simulation/dynamics/sphericalPendulum/_Documentation/secUserGuide.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14",
"max_issues_repo_issues_event_max_datetime": "2019-03-13T20:52:22.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-03-13T20:52:22.000Z",
"max_issues_repo_licenses": [
"0BSD"
],
"max_issues_repo_name": "ian-cooke/basilisk_mag",
"max_issues_repo_path": "src/simulation/dynamics/sphericalPendulum/_Documentation/secUserGuide.tex",
"max_line_length": 132,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14",
"max_stars_repo_licenses": [
"0BSD"
],
"max_stars_repo_name": "ian-cooke/basilisk_mag",
"max_stars_repo_path": "src/simulation/dynamics/sphericalPendulum/_Documentation/secUserGuide.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 304,
"size": 1144
} |
\section{Divine Salvation}\label{wonder:divineSalvation}
\textbf{Cost:} 1,000 CP\\
\textbf{Requirements:} Disciple of a God\\
\textbf{Active, Wonder, Memory, Source(2,000 Gold)}\\
When a creature that you can see would be killed, you can react and pray to your deity to save this creature's life.
As a result, the creature instead drops to 1 Health and becomes unconscious for one hour.
\\ | {
"alphanum_fraction": 0.763496144,
"avg_line_length": 55.5714285714,
"ext": "tex",
"hexsha": "7562e141058833a44bed3886f64bb436adcddaff",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "NTrixner/RaggedLandsPenAndPaper",
"max_forks_repo_path": "perks/divine/wonders/divinesalvation.tex",
"max_issues_count": 155,
"max_issues_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95",
"max_issues_repo_issues_event_max_datetime": "2022-03-03T13:49:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-03-18T13:19:57.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "NTrixner/RaggedLandsPenAndPaper",
"max_issues_repo_path": "perks/divine/wonders/divinesalvation.tex",
"max_line_length": 116,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "NTrixner/RaggedLandsPenAndPaper",
"max_stars_repo_path": "perks/divine/wonders/divinesalvation.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-03T09:32:08.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-03-13T09:33:31.000Z",
"num_tokens": 110,
"size": 389
} |
%============================%
% %
% DOC SMECY COMPILER %
% %
%============================%
%=====HEADER=====%
\documentclass[a4paper,11pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage[english]{babel}
\usepackage{hyperref}
%\usepackage{algorithmic}
%\usepackage{algorithm}
%\usepackage[left=90px,hmarginratio=1:1,top=25mm]{geometry}
%\usepackage{amsthm}
%\usepackage{amsmath}
%\usepackage{amssymb}
%\usepackage{mathrsfs}
\usepackage{graphicx}
%\renewcommand{\algorithmiccomment}[1]{//#1}
\newcommand{\compiler}{SMECC}
\newcommand{\scompiler}{SMECC }
%=====TITLE=====%
\title{\scompiler -- A SME-C compiler using ROSE}
\author{Vincent \sc Lanore}
%=====DOCUMENT=====%
\begin{document}
\maketitle
\section{Presentation}
\scompiler (for SME-C Compiler) is a C99/C++ compiler able to process SMECY pragmas to map function calls to hardware accelerators. \scompiler is written using ROSE \cite{usermanual,tuto}, a tool to write source-to-source translators. First, input code is parsed using ROSE front-end (depends on the input language) into a SageIII AST (ROSE's AST). Then, SMECY pragmas are processed and translated into calls to the SMECY API. Finally, the ROSE backend is called to produce C code with calls to the SMECY API which can be compiled using a regular compiler.
\section{Features}
\scompiler currently supports the following features :
\begin{itemize}
\item translation of \verb+#pragma smecy map+ directives applied to function calls of the form \verb+function(parameters);+, \verb+varName = function(parameters);+ or \verb+type varName = function(parameters);+;
\item support from following \verb+arg+ clauses : type (in, out...), size and range;
\item verification of the contiguity of the vector arguments in memory;
\item computing ranges to get the actual dimension of any argument, printing warning when arguments with dimension $>1$ are used as vectors;
\item automatically finding the size of arrays if not specified in pragma.
\end{itemize}
\section{How to use}
\paragraph{Environment} Before using the compiler a few environment variable should be set.
\begin{itemize}
\item add \scompiler directory to the \verb+$PATH+ :
\begin{verbatim}
export PATH=smecc_directory/:$PATH
\end{verbatim}
\item set \verb+SMECY_LIB+ to the directory containing the
SMECY library :
\begin{verbatim}
export SMECY_LIB=smecy_lib_directory/
\end{verbatim}
\end{itemize}
\paragraph{Usage}
\scompiler works mostly like a regular C/C++ compiler. Most C/C++ usual compiler flags will work with a few exceptions and additions (see below). By default, it will \emph{not} compile smecy pragmas (see below).
\paragraph{Specific flags}
\scompiler supports some specific flags. Here are a few examples, for a more complete list type \verb+smecc --help+.
\begin{itemize}
\item \verb+-smecy+ triggers smecy pragmas
translation/compilation; if pragmas contain many expressions
\scompiler may produce a lot of output: \verb+>\dev\null+ is
recommended to discard them;
\item \verb+-smecy-accel+ asks for the generation of the
accelerator parts, mainly by outlining the \texttt{map}-ped
function;
\item \verb+--smecy_lib=smecy_lib_directory/+ can be used to specify the path to the SMECY library; if specified it will be used instead of the environment variable \verb+SEMCY_LIB+;
\item \verb+-std=c99+ should be used when compiling C99;
\item \verb+-c+ will only translate input file instead of compiling it; with input file \verb+fileName.C+, \scompiler will generate a \verb+rose_fileName.C+ file with calls to SMECY API instead of SMECY pragmas;
\item \verb+-fopenmp+ triggers OpenMP pragmas compilation using the back-end compiler.
\end{itemize}
\paragraph{Example} To compile a C99 input file with smecy and OpenMP pragmas without useless output type:
\begin{verbatim}
smecc -std=c99 -fopenmp -smecy input.c
\end{verbatim}
\section{Known bugs and limitations}
\paragraph{Features not yet implemented:}
\begin{itemize}
\item FORTRAN support;
\item only toy implementation of the SMECY API.
\end{itemize}
\paragraph{AstRewriteMechanism bugs:}
\begin{itemize}
\item crash if the C++ input file has certain extensions (like ".cpp"), changing the extension to ".C" seems to solve the problem;
\item the parser called for the strings is always in C++ mode (not C), commenting out a few lines in a ROSE header prevents front-end errors;
\item the parsing of expressions is extremely slow (several seconds to parse ten expressions) and generates 1 file per expression to parse.
\end{itemize}
\paragraph{Compatibility with ROSE OpenMP lowering}
\begin{itemize}
\item ROSE OpenMP built-in support conflicts with smecy lowering and requires special handling;
\item OpenMP files lowered using XOMP library require special linking, see in \verb+rose_install_dir/src/midend/programTransformation/ompLowering/+ for the library files.
\end{itemize}
\paragraph{Other bugs}
\begin{itemize}
\item if \verb+-smecy+ is not set, multi-line pragmas will lose their \verb+\+ and fail to compile.
\end{itemize}
\bibliography{biblio}
\bibliographystyle{plain}
\end{document}
% Emacs religion:
%%% Local Variables:
%%% mode: latex
%%% ispell-local-dictionary: "american"
%%% TeX-PDF-mode: t
%%% TeX-master: t
%%% End:
% Be fair, vi religion too :-)
% vim: spell spelllang=en
| {
"alphanum_fraction": 0.7217125382,
"avg_line_length": 44.119047619,
"ext": "tex",
"hexsha": "4119ac0e7deac66e7ef3a12e672b59507c316db4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ddb9361aba2887151ff001d96be6f2ef03ca1af2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "keryell/smecc",
"max_forks_repo_path": "doc/user_guide/doc.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ddb9361aba2887151ff001d96be6f2ef03ca1af2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "keryell/smecc",
"max_issues_repo_path": "doc/user_guide/doc.tex",
"max_line_length": 558,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ddb9361aba2887151ff001d96be6f2ef03ca1af2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "keryell/smecc",
"max_stars_repo_path": "doc/user_guide/doc.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1493,
"size": 5559
} |
\documentclass[aspectratio=169]{beamer}
\usepackage[utf8]{inputenc} % required for umlauts
\usepackage[english]{babel} % language
%\usepackage[sfdefault]{roboto} % enable sans serif font roboto
%\usepackage{libertine} % enable this on Windows to allow for microtype
\usepackage[T1]{fontenc} % required for output of umlauts in PDF
\usepackage{mathtools} % required for formulas
\usepackage{caption} % Customize caption aesthetics
\usepackage{tcolorbox} % fancy colored boxes
\usepackage{xcolor} % Highlighting
\usepackage{soul}
\usepackage{graphicx} % required to insert images
\usepackage{subcaption} % enable sub-figure
\usepackage[space]{grffile} % insert images baring a filename which contains spaces
\usepackage{float} % allow to forcefully set the location of an object
\usepackage[tracking=true]{microtype} % required to change character spacing
\usepackage[style=numeric,backend=biber]{biblatex}
\usepackage{hyperref} % insert clickable references
\usepackage{datetime} % flexible date specification
\newcommand{\leadingzero}[1]{\ifnum#1<10 0\the#1\else\the#1\fi}
\newcommand{\todayddmmyyyy}{\leadingzero{\day}.\leadingzero{\month}.\the\year}
\newcommand{\mathcolorbox}[2]{\colorbox{#1}{$\displaystyle #2$}}
\usepackage{geometry}
\usepackage{scrextend} % allow arbitrary indentation
\usepackage{color}
\setbeamercolor{title}{fg=orange}
\setbeamertemplate{title}{
\color{orange}
\textbf{\inserttitle}
}
\setbeamercolor{tableofcontents}{fg=orange}
\setbeamercolor{section in toc}{fg=black}
\setbeamercolor{subsection in toc}{fg=black}
\setbeamertemplate{frametitle}{
%\vspace{0.5em}
\color{orange}
\begin{center}
\textbf{\insertframetitle} \\
{\small \insertframesubtitle}
\end{center}
}
\setbeamertemplate{footline}[text line]{
\parbox{\linewidth}{
\color{gray}
\vspace*{-1em}
PSRC 2018
\hfill
Gordian (\href{mailto:[email protected]}{[email protected]})
\hfill
\insertpagenumber
}
}
\setbeamertemplate{navigation symbols}{}
\setbeamertemplate{itemize item}{\color{black}$\bullet$}
\setbeamertemplate{itemize subitem}{\color{black}$\circ$}
\setbeamercolor{block title}{fg=black}
\captionsetup{font=scriptsize,labelfont={bf,scriptsize}}
\title{Seventh Weekly Update on `Optimization~of~Particle~Identification'}
\subtitle{Neyman Pearson by detector, pt and cosTheta; Abundance comparisons; Neural Network for different optimizers and various parameters}
\author[Edenhofer]{\href{mailto:[email protected]}{Gordian Edenhofer}}
\institute[LMU]{
Working Group of Prof.~Dr.~Kuhr \\
Faculty of Physics \\
Excellence Cluster Universe
}
\date[BA Thesis 2018]{\today}
\subject{Particle Physics}
\begin{document}
\section{Git log}
\begin{frame}
\frametitle{\insertsection}
\begin{itemize}
\item Neyman-Pearson with new CDC model
\item Neural network
\begin{itemize}
\item{By optimizer}
\item{By number of principal components}
\end{itemize}
\item Writing the thesis
\end{itemize}
\end{frame}
\section{Neyman-Pearson}
\subsection{Anomalies}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\subcaptionbox{Unset}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/unset/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for CDC detector}}}
}
\subcaptionbox{Set}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/set/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for CDC detector}}}
}
\caption{Relative $p$ Abundance in Likelihood Ratio Bins for the `CDC' detector for different detector calibrations.}
\end{figure}
\end{frame}
\section{Neural network}
\subsection{By optimizer}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\subcaptionbox{RMSprop}{
\includegraphics[width=0.3\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy pca ncomponents70 fair nLayers8 Optimizerrmsprop LearningRateNone nEpochs20 BatchSize256}}}
}
\subcaptionbox{Adadelta}{
\includegraphics[width=0.3\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy pca ncomponents70 fair nLayers8 Optimizeradadelta LearningRateNone nEpochs15 BatchSize256}}}
}
\subcaptionbox{Adamax}{
\includegraphics[width=0.3\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy pca ncomponents70 fair nLayers8 Optimizeradamax LearningRateNone nEpochs15 BatchSize256}}}
}
\caption{Accuracy by optimizer for a PCA feature selection and using fair particle sampling.}
\end{figure}
\end{frame}
\subsection{By number of principal components for Adadelta}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\subcaptionbox{50}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy pca ncomponents50 fair nLayers8 Optimizeradadelta LearningRateNone nEpochs15 BatchSize256}}}
}
\subcaptionbox{70}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy pca ncomponents70 fair nLayers8 Optimizeradadelta LearningRateNone nEpochs15 BatchSize256}}}
}
\caption{Accuracy of the Adadelta optimizer by number of principal components and using fair particle sampling.}
\end{figure}
\end{frame}
\subsection{Final Identification results}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\includegraphics[width=\textwidth,height=0.65\textheight,keepaspectratio]{{{../res/charged 01/Diff Heatmap: Heatmap of epsilonPID Matrix for an exclusive Cut by pt & cos(Theta), via NN}}}
\caption{Heatmap of the $\epsilon_{PID}$ matrix for an exclusive Cut via multivariate Bayes and via a neural network.}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\includegraphics[width=\textwidth,height=0.65\textheight,keepaspectratio]{{{../res/charged 01/Diff Abundances: Particle Abundances in the K+-Data via PID, via NN}}}
\caption{Assumed particle abundances via multivariate Bayes and via a neural network.}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\subcaptionbox{Kaon}{
\includegraphics[width=0.47\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Diff Statistics: K Identification (without Ratios) TPR over PPV by pt & cos(Theta), via NN}}}
}
\subcaptionbox{Pion}{
\includegraphics[width=0.47\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Diff Statistics: pi Identification (without Ratios) TPR over PPV by pt & cos(Theta), via NN}}}
}
\caption{TPR over PPV for various methods of identifying particles.}
\end{figure}
\end{frame}
\section{Appendix}
\subsection{Anomalies in bins for generic mixed decay}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\subcaptionbox{Charged}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for CDC detector for equal size pt bins}}}
}
\subcaptionbox{Mixed}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/mixed 01/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for CDC detector for equal size pt bins}}}
}
\caption{Relative $p$ Abundance in Likelihood Ratio Bins for the `ALL' detector using \textit{equal~height} $p_t$ bins.}
\end{figure}
\end{frame}
\subsection{Anomalies by detector}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\subcaptionbox{SVD}{
\includegraphics[width=\textwidth,height=0.22\textheight,keepaspectratio]{{{../res/set/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for SVD detector}}}
}
\subcaptionbox{CDC}{
\includegraphics[width=\textwidth,height=0.22\textheight,keepaspectratio]{{{../res/set/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for CDC detector}}}
}
\subcaptionbox{TOP}{
\includegraphics[width=\textwidth,height=0.22\textheight,keepaspectratio]{{{../res/set/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for TOP detector}}}
}
\subcaptionbox{ARICH}{
\includegraphics[width=\textwidth,height=0.22\textheight,keepaspectratio]{{{../res/set/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for ARICH detector}}}
}
\subcaptionbox{ECL}{
\includegraphics[width=\textwidth,height=0.22\textheight,keepaspectratio]{{{../res/set/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for ECL detector}}}
}
\subcaptionbox{KLM}{
\includegraphics[width=\textwidth,height=0.22\textheight,keepaspectratio]{{{../res/set/pidProbability Approach: Relative p Abundance in Likelihood Ratio Bins for KLM detector}}}
}
\caption{Relative $p$ abundance in likelihood ratio bins for various detectors.}
\end{figure}
\end{frame}
\subsection{By using `All' approach }
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\includegraphics[width=0.55\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy all fair nLayers7 Optimizerrmsprop LearningRateNone nEpochs15 BatchSize256}}}
\caption{Accuracy of the RMSprop optimizer using all features and fair particle sampling (with 7 layers).}
\end{figure}
\end{frame}
\subsection{By number of principal components for Adamax}
\begin{frame}
\frametitle{\insertsection}
\framesubtitle{\insertsubsection}
\begin{figure}
\centering
\subcaptionbox{50}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy pca ncomponents50 fair nLayers8 Optimizeradamax LearningRateNone nEpochs15 BatchSize256}}}
}
\subcaptionbox{70}{
\includegraphics[width=0.45\textwidth,height=\textheight,keepaspectratio]{{{../res/charged 01/Neural Network Model: Accuracy pca ncomponents70 fair nLayers8 Optimizeradamax LearningRateNone nEpochs15 BatchSize256}}}
}
\caption{Accuracy of the Adamax optimizer by number of principal components and using fair particle sampling.}
\end{figure}
\end{frame}
\end{document}
| {
"alphanum_fraction": 0.7677227539,
"avg_line_length": 40.0111524164,
"ext": "tex",
"hexsha": "c965ef3cbf67ba5094d3bc8da1dae7642ecef769",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9922a1fd3e5fbc39f701aa18cb4d2df37ead9693",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Edenhofer/PID-boost",
"max_forks_repo_path": "doc/updates/08-Weekly Update.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9922a1fd3e5fbc39f701aa18cb4d2df37ead9693",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Edenhofer/PID-boost",
"max_issues_repo_path": "doc/updates/08-Weekly Update.tex",
"max_line_length": 220,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "9922a1fd3e5fbc39f701aa18cb4d2df37ead9693",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Edenhofer/PID-boost",
"max_stars_repo_path": "doc/updates/08-Weekly Update.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3296,
"size": 10763
} |
%
% The OpenCV cheatsheet structure:
%
% opencv data structures
% point, rect
% matrix
%
% creating matrices
% from scratch
% from previously allocated data: plain arrays, vectors
% converting to/from old-style structures
%
% element access, iteration through matrix elements
%
% copying & shuffling matrix data
% copying & converting the whole matrices
% extracting matrix parts & copying them
% split, merge & mixchannels
% flip, transpose, repeat
%
% matrix & image operations:
% arithmetics & logic
% matrix multiplication, inversion, determinant, trace, SVD
% statistical functions
%
% basic image processing:
% image filtering with predefined & custom filters
% example: finding local maxima
% geometrical transformations, resize, warpaffine, perspective & remap.
% color space transformations
% histograms & back projections
% contours
%
% i/o:
% displaying images
% saving/loading to/from file (XML/YAML & image file formats)
% reading videos & camera feed, writing videos
%
% operations on point sets:
% findcontours, bounding box, convex hull, min area rect,
% transformations, to/from homogeneous coordinates
% matching point sets: homography, fundamental matrix, rigid transforms
%
% 3d:
% camera calibration, pose estimation.
% uncalibrated case
% stereo: rectification, running stereo correspondence, obtaining the depth.
%
% feature detection:
% features2d toolbox
%
% object detection:
% using a classifier running on a sliding window: cascadeclassifier + hog.
% using salient point features: features2d -> matching
%
% statistical data processing:
% clustering (k-means),
% classification + regression (SVM, boosting, k-nearest),
% compressing data (PCA)
%
\documentclass[10pt,landscape]{article}
\usepackage[usenames,dvips,pdftex]{color}
\usepackage{multicol}
\usepackage{calc}
\usepackage{ifthen}
\usepackage[pdftex]{color,graphicx}
\usepackage[landscape]{geometry}
\usepackage{hyperref}
\hypersetup{colorlinks=true, filecolor=black, linkcolor=black, urlcolor=blue, citecolor=black}
\graphicspath{{./images/}}
% This sets page margins to .5 inch if using letter paper, and to 1cm
% if using A4 paper. (This probably isn't strictly necessary.)
% If using another size paper, use default 1cm margins.
\ifthenelse{\lengthtest { \paperwidth = 11in}}
{ \geometry{top=.5in,left=.5in,right=.5in,bottom=.5in} }
{\ifthenelse{ \lengthtest{ \paperwidth = 297mm}}
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
{\geometry{top=1cm,left=1cm,right=1cm,bottom=1cm} }
}
% Turn off header and footer
% \pagestyle{empty}
% Redefine section commands to use less space
\makeatletter
\renewcommand{\section}{\@startsection{section}{1}{0mm}%
{-1ex plus -.5ex minus -.2ex}%
{0.5ex plus .2ex}%x
{\normalfont\large\bfseries}}
\renewcommand{\subsection}{\@startsection{subsection}{2}{0mm}%
{-1explus -.5ex minus -.2ex}%
{0.5ex plus .2ex}%
{\normalfont\normalsize\bfseries}}
\renewcommand{\subsubsection}{\@startsection{subsubsection}{3}{0mm}%
{-1ex plus -.5ex minus -.2ex}%
{1ex plus .2ex}%
{\normalfont\small\bfseries}}
\makeatother
% Define BibTeX command
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}}
% Don't print section numbers
\setcounter{secnumdepth}{0}
%\setlength{\parindent}{0pt}
%\setlength{\parskip}{0pt plus 0.5ex}
\newcommand{\ccode}[1]{
\begin{alltt}
#1
\end{alltt}
}
% -----------------------------------------------------------------------
\begin{document}
\raggedright
\footnotesize
\begin{multicols}{3}
% multicol parameters
% These lengths are set only within the two main columns
%\setlength{\columnseprule}{0.25pt}
\setlength{\premulticols}{1pt}
\setlength{\postmulticols}{1pt}
\setlength{\multicolsep}{1pt}
\setlength{\columnsep}{2pt}
\begin{center}
\Large{\textbf{OpenCV 2.3 Cheat Sheet (C++)}} \\
\end{center}
\newlength{\MyLen}
\settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
%\section{Filesystem Concepts}
%\begin{tabular}{@{}p{\the\MyLen}%
% @{}p{\linewidth-\the\MyLen}@{}}
%\texttt{\href{http://www.ros.org/wiki/Packages}{package}} & The lowest level of ROS software organization. \\
%\texttt{\href{http://www.ros.org/wiki/Manifest}{manifest}} & Description of a ROS package. \\
%\texttt{\href{http://www.ros.org/wiki/Stack}{stack}} & Collections of ROS packages that form a higher-level library. \\
%\texttt{\href{http://www.ros.org/wiki/Stack Manifest}{stack manifest}} & Description of a ROS stack.
%\end{tabular}
\emph{The OpenCV C++ reference manual is here: \url{http://opencv.willowgarage.com/documentation/cpp/}. Use \textbf{Quick Search} to find descriptions of the particular functions and classes}
\section{Key OpenCV Classes}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#point}{Point\_}} & Template 2D point class \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#point3}{Point3\_}} & Template 3D point class \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#size}{Size\_}} & Template size (width, height) class \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#vec}{Vec}} & Template short vector class \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#vec}{Matx}} & Template small matrix class \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#scalar}{Scalar}} & 4-element vector \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#rect}{Rect}} & Rectangle \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#range}{Range}} & Integer value range \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#mat}{Mat}} & 2D or multi-dimensional dense array (can be used to store matrices, images, histograms, feature descriptors, voxel volumes etc.)\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#sparsemat}{SparseMat}} & Multi-dimensional sparse array \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#ptr}{Ptr}} & Template smart pointer class
\end{tabular}
\section{Matrix Basics}
\begin{tabbing}
\textbf{Cr}\=\textbf{ea}\=\textbf{te}\={} \textbf{a matrix} \\
\> \texttt{Mat image(240, 320, CV\_8UC3);} \\
\textbf{[Re]allocate a pre-declared matrix}\\
\> \texttt{image.\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::create}{create}(480, 640, CV\_8UC3);}\\
\textbf{Create a matrix initialized with a constant}\\
\> \texttt{Mat A33(3, 3, CV\_32F, Scalar(5));} \\
\> \texttt{Mat B33(3, 3, CV\_32F); B33 = Scalar(5);} \\
\> \texttt{Mat C33 = Mat::ones(3, 3, CV\_32F)*5.;} \\
\> \texttt{Mat D33 = Mat::zeros(3, 3, CV\_32F) + 5.;} \\
\textbf{Create a matrix initialized with specified values}\\
\> \texttt{double a = CV\_PI/3;} \\
\> \texttt{Mat A22 = (Mat\_<float>(2, 2) <<} \\
\> \> \texttt{cos(a), -sin(a), sin(a), cos(a));} \\
\> \texttt{float B22data[] = \{cos(a), -sin(a), sin(a), cos(a)\};} \\
\> \texttt{Mat B22 = Mat(2, 2, CV\_32F, B22data).clone();}\\
\textbf{Initialize a random matrix}\\
\> \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-randu}{randu}(image, Scalar(0), Scalar(256)); }\textit{// uniform dist}\\
\> \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-randn}{randn}(image, Scalar(128), Scalar(10)); }\textit{// Gaussian dist}\\
\textbf{Convert matrix to/from other structures}\\
\>\textbf{(without copying the data)}\\
\> \texttt{Mat image\_alias = image;}\\
\> \texttt{float* Idata=new float[480*640*3];}\\
\> \texttt{Mat I(480, 640, CV\_32FC3, Idata);}\\
\> \texttt{vector<Point> iptvec(10);}\\
\> \texttt{Mat iP(iptvec); }\textit{// iP -- 10x1 CV\_32SC2 matrix}\\
\> \texttt{IplImage* oldC0 = cvCreateImage(cvSize(320,240),16,1);}\\
\> \texttt{Mat newC = cvarrToMat(oldC0);}\\
\> \texttt{IplImage oldC1 = newC; CvMat oldC2 = newC;}\\
\textbf{... (with copying the data)}\\
\> \texttt{Mat newC2 = cvarrToMat(oldC0).clone();}\\
\> \texttt{vector<Point2f> ptvec = Mat\_<Point2f>(iP);}\\
\>\\
\textbf{Access matrix elements}\\
\> \texttt{A33.at<float>(i,j) = A33.at<float>(j,i)+1;}\\
\> \texttt{Mat dyImage(image.size(), image.type());}\\
\> \texttt{for(int y = 1; y < image.rows-1; y++) \{}\\
\> \> \texttt{Vec3b* prevRow = image.ptr<Vec3b>(y-1);}\\
\> \> \texttt{Vec3b* nextRow = image.ptr<Vec3b>(y+1);}\\
\> \> \texttt{for(int x = 0; y < image.cols; x++)}\\
\> \> \> \texttt{for(int c = 0; c < 3; c++)}\\
\> \> \> \texttt{ dyImage.at<Vec3b>(y,x)[c] =}\\
\> \> \> \texttt{ saturate\_cast<uchar>(}\\
\> \> \> \texttt{ nextRow[x][c] - prevRow[x][c]);}\\
\> \texttt{\} }\\
\> \texttt{Mat\_<Vec3b>::iterator it = image.begin<Vec3b>(),}\\
\> \> \texttt{itEnd = image.end<Vec3b>();}\\
\> \texttt{for(; it != itEnd; ++it)}\\
\> \> \texttt{(*it)[1] \textasciicircum{}= 255;}\\
\end{tabbing}
\section{Matrix Manipulations: Copying, Shuffling, Part Access}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::copyTo}{src.copyTo(dst)}} & Copy matrix to another one \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::convertTo}{src.convertTo(dst,type,scale,shift)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Scale and convert to another datatype \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::clone}{m.clone()}} & Make deep copy of a matrix \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::reshape}{m.reshape(nch,nrows)}} & Change matrix dimensions and/or number of channels without copying data \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::row}{m.row(i)}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::col}{m.col(i)}} & Take a matrix row/column \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::rowRange}{m.rowRange(Range(i1,i2))}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::colRange}{m.colRange(Range(j1,j2))}} & \ \ \ \ \ \ \ Take a matrix row/column span \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::diag}{m.diag(i)}} & Take a matrix diagonal \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#index-1245}{m(Range(i1,i2),Range(j1,j2)), m(roi)}} & \ \ \ \ \ \ \ \ \ \ \ \ \ Take a submatrix \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html\#Mat::repeat}{m.repeat(ny,nx)}} & Make a bigger matrix from a smaller one \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-flip}{flip(src,dst,dir)}} & Reverse the order of matrix rows and/or columns \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-split}{split(...)}} & Split multi-channel matrix into separate channels \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-merge}{merge(...)}} & Make a multi-channel matrix out of the separate channels \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-mixchannels}{mixChannels(...)}} & Generalized form of split() and merge() \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-randshuffle}{randShuffle(...)}} & Randomly shuffle matrix elements \\
\end{tabular}
\begin{tabbing}
Exa\=mple 1. Smooth image ROI in-place\\
\>\texttt{Mat imgroi = image(Rect(10, 20, 100, 100));}\\
\>\texttt{GaussianBlur(imgroi, imgroi, Size(5, 5), 1.2, 1.2);}\\
Example 2. Somewhere in a linear algebra algorithm \\
\>\texttt{m.row(i) += m.row(j)*alpha;}\\
Example 3. Copy image ROI to another image with conversion\\
\>\texttt{Rect r(1, 1, 10, 20);}\\
\>\texttt{Mat dstroi = dst(Rect(0,10,r.width,r.height));}\\
\>\texttt{src(r).convertTo(dstroi, dstroi.type(), 1, 0);}\\
\end{tabbing}
\section{Simple Matrix Operations}
OpenCV implements most common arithmetical, logical and
other matrix operations, such as
\begin{itemize}
\item
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-add}{add()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-subtract}{subtract()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-multiply}{multiply()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-divide}{divide()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-absdiff}{absdiff()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#bitwise-and}{bitwise\_and()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#bitwise-or}{bitwise\_or()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#bitwise-xor}{bitwise\_xor()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-max}{max()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-min}{min()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-compare}{compare()}}
-- correspondingly, addition, subtraction, element-wise multiplication ... comparison of two matrices or a matrix and a scalar.
\begin{tabbing}
Exa\=mple. \href{http://en.wikipedia.org/wiki/Alpha_compositing}{Alpha compositing} function:\\
\texttt{void alphaCompose(const Mat\& rgba1,}\\
\> \texttt{const Mat\& rgba2, Mat\& rgba\_dest)}\\
\texttt{\{ }\\
\> \texttt{Mat a1(rgba1.size(), rgba1.type()), ra1;}\\
\> \texttt{Mat a2(rgba2.size(), rgba2.type());}\\
\> \texttt{int mixch[]=\{3, 0, 3, 1, 3, 2, 3, 3\};}\\
\> \texttt{mixChannels(\&rgba1, 1, \&a1, 1, mixch, 4);}\\
\> \texttt{mixChannels(\&rgba2, 1, \&a2, 1, mixch, 4);}\\
\> \texttt{subtract(Scalar::all(255), a1, ra1);}\\
\> \texttt{bitwise\_or(a1, Scalar(0,0,0,255), a1);}\\
\> \texttt{bitwise\_or(a2, Scalar(0,0,0,255), a2);}\\
\> \texttt{multiply(a2, ra1, a2, 1./255);}\\
\> \texttt{multiply(a1, rgba1, a1, 1./255);}\\
\> \texttt{multiply(a2, rgba2, a2, 1./255);}\\
\> \texttt{add(a1, a2, rgba\_dest);}\\
\texttt{\}}
\end{tabbing}
\item
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-sum}{sum()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-mean}{mean()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-mean-stddev}{meanStdDev()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-norm}{norm()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-countnonzero}{countNonZero()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-minmaxloc}{minMaxLoc()}},
-- various statistics of matrix elements.
\item
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-exp}{exp()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-log}{log()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-pow}{pow()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-sqrt}{sqrt()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-carttopolar}{cartToPolar()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-polarToCart}{polarToCart()}}
-- the classical math functions.
\item
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-scaleadd}{scaleAdd()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-transpose}{transpose()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-gemm}{gemm()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-invert}{invert()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-solve}{solve()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-determinant}{determinant()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-trace}{trace()}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-eigen}{eigen()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-SVD}{SVD}},
-- the algebraic functions + SVD class.
\item
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-dft}{dft()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-idft}{idft()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-dct}{dct()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/core_operations_on_arrays.html\#cv-idct}{idct()}},
-- discrete Fourier and cosine transformations
\end{itemize}
For some operations a more convenient \href{http://opencv.willowgarage.com/documentation/cpp/core_basic_structures.html#matrix-expressions}{algebraic notation} can be used, for example:
\begin{tabbing}
\texttt{Mat}\={} \texttt{delta = (J.t()*J + lambda*}\\
\>\texttt{Mat::eye(J.cols, J.cols, J.type()))}\\
\>\texttt{.inv(CV\_SVD)*(J.t()*err);}
\end{tabbing}
implements the core of Levenberg-Marquardt optimization algorithm.
\section{Image Processsing}
\subsection{Filtering}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-filter2d}{filter2D()}} & Non-separable linear filter \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-sepfilter2d}{sepFilter2D()}} & Separable linear filter \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-blur}{boxFilter()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-gaussianblur}{GaussianBlur()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-medianblur}{medianBlur()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-bilateralfilter}{bilateralFilter()}}
& Smooth the image with one of the linear or non-linear filters \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-sobel}{Sobel()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-scharr}{Scharr()}}
& Compute the spatial image derivatives \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-laplacian}{Laplacian()}} & compute Laplacian: $\Delta I = \frac{\partial ^ 2 I}{\partial x^2} + \frac{\partial ^ 2 I}{\partial y^2}$ \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-erode}{erode()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_image_filtering.html\#cv-dilate}{dilate()}} & Morphological operations \\
\end{tabular}
\begin{tabbing}
Exa\=mple. Filter image in-place with a 3x3 high-pass kernel\\
\> (preserve negative responses by shifting the result by 128):\\
\texttt{filter2D(image, image, image.depth(), (Mat\_<float>(3,3)<<}\\
\> \texttt{-1, -1, -1, -1, 9, -1, -1, -1, -1), Point(1,1), 128);}\\
\end{tabbing}
\subsection{Geometrical Transformations}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-resize}{resize()}} & Resize image \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-getrectsubpix}{getRectSubPix()}} & Extract an image patch \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-warpaffine}{warpAffine()}} & Warp image affinely\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-warpperspective}{warpPerspective()}} & Warp image perspectively\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-remap}{remap()}} & Generic image warping\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_geometric_image_transformations.html\#cv-convertmaps}{convertMaps()}} & Optimize maps for a faster remap() execution\\
\end{tabular}
\begin{tabbing}
Example. Decimate image by factor of $\sqrt{2}$:\\
\texttt{Mat dst; resize(src, dst, Size(), 1./sqrt(2), 1./sqrt(2));}
\end{tabbing}
\subsection{Various Image Transformations}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#cvtColor}{cvtColor()}} & Convert image from one color space to another \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#threshold}{threshold()}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#adaptivethreshold}{adaptivethreshold()}} & Convert grayscale image to binary image using a fixed or a variable threshold \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#floodfill}{floodFill()}} & Find a connected component using region growing algorithm\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#floodfill}{integral()}} & Compute integral image \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#distancetransform}{distanceTransform()}}
& build distance map or discrete Voronoi diagram for a binary image. \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#floodfill}{watershed()}},
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_miscellaneous_image_transformations.html\#grabcut}{grabCut()}}
& marker-based image segmentation algorithms.
See the samples \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/watershed.cpp}{watershed.cpp}} and \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/grabcut.cpp}{grabcut.cpp}}.
\end{tabular}
\subsection{Histograms}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#calchist}{calcHist()}} & Compute image(s) histogram \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#calcbackproject}{calcBackProject()}} & Back-project the histogram \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#equalizehist}{equalizeHist()}} & Normalize image brightness and contrast\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/imgproc_histograms.html\#comparehist}{compareHist()}} & Compare two histograms\\
\end{tabular}
\begin{tabbing}
Example. Compute Hue-Saturation histogram of an image:\\
\texttt{Mat hsv, H;}\\
\texttt{cvtColor(image, hsv, CV\_BGR2HSV);}\\
\texttt{int planes[]=\{0, 1\}, hsize[] = \{32, 32\};}\\
\texttt{calcHist(\&hsv, 1, planes, Mat(), H, 2, hsize, 0);}\\
\end{tabbing}
\subsection{Contours}
See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/contours.cpp}{contours.cpp}} and \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/squares.cpp}{squares.cpp}}
samples on what are the contours and how to use them.
\section{Data I/O}
\href{http://opencv.willowgarage.com/documentation/cpp/core_xml_yaml_persistence.html\#filestorage}{XML/YAML storages} are collections (possibly nested) of scalar values, structures and heterogeneous lists.
\begin{tabbing}
\textbf{Wr}\=\textbf{iting data to YAML (or XML)}\\
\texttt{// Type of the file is determined from the extension}\\
\texttt{FileStorage fs("test.yml", FileStorage::WRITE);}\\
\texttt{fs << "i" << 5 << "r" << 3.1 << "str" << "ABCDEFGH";}\\
\texttt{fs << "mtx" << Mat::eye(3,3,CV\_32F);}\\
\texttt{fs << "mylist" << "[" << CV\_PI << "1+1" <<}\\
\>\texttt{"\{:" << "month" << 12 << "day" << 31 << "year"}\\
\>\texttt{<< 1969 << "\}" << "]";}\\
\texttt{fs << "mystruct" << "\{" << "x" << 1 << "y" << 2 <<}\\
\>\texttt{"width" << 100 << "height" << 200 << "lbp" << "[:";}\\
\texttt{const uchar arr[] = \{0, 1, 1, 0, 1, 1, 0, 1\};}\\
\texttt{fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0])));}\\
\texttt{fs << "]" << "\}";}
\end{tabbing}
\emph{Scalars (integers, floating-point numbers, text strings), matrices, STL vectors of scalars and some other types can be written to the file storages using \texttt{<<} operator}
\begin{tabbing}
\textbf{Re}\=\textbf{ading the data back}\\
\texttt{// Type of the file is determined from the content}\\
\texttt{FileStorage fs("test.yml", FileStorage::READ);}\\
\texttt{int i1 = (int)fs["i"]; double r1 = (double)fs["r"];}\\
\texttt{string str1 = (string)fs["str"];}\\
\texttt{Mat M; fs["mtx"] >> M;}\\
\texttt{FileNode tl = fs["mylist"];}\\
\texttt{CV\_Assert(tl.type() == FileNode::SEQ \&\& tl.size() == 3);}\\
\texttt{double tl0 = (double)tl[0]; string tl1 = (string)tl[1];}\\
\texttt{int m = (int)tl[2]["month"], d = (int)tl[2]["day"];}\\
\texttt{int year = (int)tl[2]["year"];}\\
\texttt{FileNode tm = fs["mystruct"];}\\
\texttt{Rect r; r.x = (int)tm["x"], r.y = (int)tm["y"];}\\
\texttt{r.width = (int)tm["width"], r.height = (int)tm["height"];}\\
\texttt{int lbp\_val = 0;}\\
\texttt{FileNodeIterator it = tm["lbp"].begin();}\\
\texttt{for(int k = 0; k < 8; k++, ++it)}\\
\>\texttt{lbp\_val |= ((int)*it) << k;}\\
\end{tabbing}
\emph{Scalars are read using the corresponding FileNode's cast operators. Matrices and some other types are read using \texttt{>>} operator. Lists can be read using FileNodeIterator's.}
\begin{tabbing}
\textbf{Wr}\=\textbf{iting and reading raster images}\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_reading_and_writing_images_and_video.html\#cv-imwrite}{imwrite}("myimage.jpg", image);}\\
\texttt{Mat image\_color\_copy = \href{http://opencv.willowgarage.com/documentation/cpp/highgui_reading_and_writing_images_and_video.html\#cv-imread}{imread}("myimage.jpg", 1);}\\
\texttt{Mat image\_grayscale\_copy = \href{http://opencv.willowgarage.com/documentation/cpp/highgui_reading_and_writing_images_and_video.html\#cv-imread}{imread}("myimage.jpg", 0);}\\
\end{tabbing}
\emph{The functions can read/write images in the following formats: \textbf{BMP (.bmp), JPEG (.jpg, .jpeg), TIFF (.tif, .tiff), PNG (.png), PBM/PGM/PPM (.p?m), Sun Raster (.sr), JPEG 2000 (.jp2)}. Every format supports 8-bit, 1- or 3-channel images. Some formats (PNG, JPEG 2000) support 16 bits per channel.}
\begin{tabbing}
\textbf{Re}\=\textbf{ading video from a file or from a camera}\\
\texttt{VideoCapture cap;}\\
\texttt{if(argc > 1) cap.open(string(argv[1])); else cap.open(0)};\\
\texttt{Mat frame; namedWindow("video", 1);}\\
\texttt{for(;;) \{}\\
\>\texttt{cap >> frame; if(!frame.data) break;}\\
\>\texttt{imshow("video", frame); if(waitKey(30) >= 0) break;}\\
\texttt{\} }
\end{tabbing}
\section{Simple GUI (highgui module)}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-namedwindow}{namedWindow(winname,flags)}} & \ \ \ \ \ \ \ \ \ \ Create named highgui window \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-destroywindow}{destroyWindow(winname)}} & \ \ \ Destroy the specified window \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-imshow}{imshow(winname, mtx)}} & Show image in the window \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-waitKey}{waitKey(delay)}} & Wait for a key press during the specified time interval (or forever). Process events while waiting. \emph{Do not forget to call this function several times a second in your code.} \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-createTrackbar}{createTrackbar(...)}} & Add trackbar (slider) to the specified window \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/highgui_user_interface.html\#cv-setmousecallback}{setMouseCallback(...)}} & \ \ Set the callback on mouse clicks and movements in the specified window \\
\end{tabular}
See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/camshiftdemo.cpp}{camshiftdemo.cpp}} and other \href{https://code.ros.org/svn/opencv/trunk/opencv/samples/}{OpenCV samples} on how to use the GUI functions.
\section{Camera Calibration, Pose Estimation and Depth Estimation}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-calibratecamera}{calibrateCamera()}} & Calibrate camera from several views of a calibration pattern. \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-findchessboardcorners}{findChessboardCorners()}} & \ \ \ \ \ \ Find feature points on the checkerboard calibration pattern. \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-solvepnp}{solvePnP()}} & Find the object pose from the known projections of its feature points. \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-stereocalibrate}{stereoCalibrate()}} & Calibrate stereo camera. \\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-stereorectify}{stereoRectify()}} & Compute the rectification transforms for a calibrated stereo camera.\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-initundistortrectifymap}{initUndistortRectifyMap()}} & \ \ \ \ \ \ Compute rectification map (for \texttt{remap()}) for each stereo camera head.\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-StereoBM}{StereoBM}}, \texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-StereoSGBM}{StereoSGBM}} & The stereo correspondence engines to be run on rectified stereo pairs.\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-reprojectimageto3d}{reprojectImageTo3D()}} & Convert disparity map to 3D point cloud.\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/calib3d_camera_calibration_and_3d_reconstruction.html\#cv-findhomography}{findHomography()}} & Find best-fit perspective transformation between two 2D point sets. \\
\end{tabular}
To calibrate a camera, you can use \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/calibration.cpp}{calibration.cpp}} or
\texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_calib.cpp}{stereo\_calib.cpp}} samples.
To get the disparity maps and the point clouds, use
\texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/stereo\_match.cpp}{stereo\_match.cpp}} sample.
\section{Object Detection}
\begin{tabular}{@{}p{\the\MyLen}%
@{}p{\linewidth-\the\MyLen}@{}}
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/objdetect__object_detection.html\#matchTemplate}{matchTemplate}} & Compute proximity map for given template.\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/objdetect__object_detection.html\#CascadeClassifier}{CascadeClassifier}} & Viola's Cascade of Boosted classifiers using Haar or LBP features. Suits for detecting faces, facial features and some other objects without diverse textures. See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/facedetect.cpp}{facedetect.cpp}}\\
\texttt{\href{http://opencv.willowgarage.com/documentation/cpp/objdetect__object_detection.html\#HOGDescriptor}{HOGDescriptor}} & N. Dalal's object detector using Histogram-of-Oriented-Gradients (HOG) features. Suits for detecting people, cars and other objects with well-defined silhouettes. See \texttt{\href{https://code.ros.org/svn/opencv/trunk/opencv/samples/cpp/peopledetect.cpp}{peopledetect.cpp}}\\
\end{tabular}
%
% feature detection:
% features2d toolbox
%
% object detection:
% using a classifier running on a sliding window: cascadeclassifier + hog.
% using salient point features: features2d -> matching
%
% statistical data processing:
% clustering (k-means),
% classification + regression (SVM, boosting, k-nearest),
% compressing data (PCA)
\end{multicols}
\end{document}
| {
"alphanum_fraction": 0.7189394369,
"avg_line_length": 57.1229773463,
"ext": "tex",
"hexsha": "125058d9e761c4e7e802d32336e584c5cdfb052c",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-14T13:23:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-14T13:23:14.000Z",
"max_forks_repo_head_hexsha": "ec0a671bc6df3c5f0fe3a94d07b6748a14a8ba91",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "fughz/frayer",
"max_forks_repo_path": "3rdparty/OpenCV-2.3.0/doc/opencv_cheatsheet.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ec0a671bc6df3c5f0fe3a94d07b6748a14a8ba91",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "fughz/frayer",
"max_issues_repo_path": "3rdparty/OpenCV-2.3.0/doc/opencv_cheatsheet.tex",
"max_line_length": 406,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ec0a671bc6df3c5f0fe3a94d07b6748a14a8ba91",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "waura/frayer",
"max_stars_repo_path": "3rdparty/OpenCV-2.3.0/doc/opencv_cheatsheet.tex",
"max_stars_repo_stars_event_max_datetime": "2019-02-28T07:40:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-02-28T07:40:01.000Z",
"num_tokens": 10407,
"size": 35302
} |
\documentclass[12pt]{article}
\usepackage[]{algorithm2e}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage[hyphens]{url}
\usepackage{listings}
\usepackage{xcolor}
\definecolor{listinggray}{gray}{0.9}
\definecolor{lbcolor}{rgb}{0.9,0.9,0.9}
\lstset{
backgroundcolor=\color{lbcolor},
tabsize=4,
language=C++,
captionpos=b,
tabsize=3,
frame=lines,
numbers=left,
numberstyle=\tiny,
numbersep=5pt,
breaklines=true,
showstringspaces=false,
basicstyle=\footnotesize,
% identifierstyle=\color{magenta},
keywordstyle=\color[rgb]{0,0,1},
commentstyle=\color{Darkgreen},
stringstyle=\color{red}
}
\begin{document}
\title{Project 03 CS-790}
\author{Brandon Bluemner}
\date{2017}
\maketitle
% ================================================
% Abstract
% ================================================
\begin{abstract}
Algorithmic approach to the Matrix Shifting Puzzle
\end{abstract}
% =================================================
% Overview
% =================================================
\section{Overview}
\subsection{Matrix shifting Puzzle}
Let $M$ be an $n \times n$ array of numbers 1 through $n^2$,
each move $rotate(row,p)$ or $rotate(col,p)$ cost p s.t. $p \in [0...n-1]$
rotate would yield two options, rotate left or rotate right, which will
have an impact on the out come of the algorithm. The objective is to find
the minimum cost from start state $\Rightarrow$ goal state.
\section{Implementation}
% =================================================
% Heuristic function
% =================================================
\subsection{Heuristic function}
The code implements a relative distance.
Trying to find the approximate distance to gaol.
This is done by finding the distance from the two points of the grid
start at $(current_x,current_y) \rightarrow (goal_x,goal_y)$.
The sum absolute value of the difference with respect to the partials is
the used to get the current value. (see Figure \ref{fig:h_func})
\begin{figure}
\begin{lstlisting}
int cost = 0;
int temp[2]={0,0};
for(int i=0; i < current.size(); i++ ){
for(int j=0; j< current.at(i).size(); j++ ){
get_goal_position(current[i][j], goal, temp);
int dx = temp[0]-i;
int dy = temp[1]-j;
cost += std::abs(dx) + std::abs(dy);
}
}
return cost;
\end{lstlisting}
\caption{Heuristic function}
\label{fig:h_func}
\end{figure}
Anther attempt was made on the same code base with the following
change to line 8 of Figure \ref{fig:h_func} as shown in Figure \ref{fig:h_func_1}
\begin{figure}
\begin{lstlisting}
cost += ((int) (std::sqrt( dx*dx + dy*dy )))
\end{lstlisting}
\caption{Heuristic function Change}
\label{fig:h_func_1}
\end{figure}
This yield a worst result and significate performance impact, which if you consider
the math abs can be simplified by using a bit ``hack'' (see Figure \ref{fig:bit_twiddle}) which
can eliminate computational steps compared to takeing the square root.
\begin{figure}
\begin{lstlisting}
int my_abs(int x)
{
int s = x >> 31;
return (x ^ s) - s;
}
\end{lstlisting}
\caption{Example abs implementation}
\label{fig:bit_twiddle}
\end{figure}
% =================================================
% Hashing function
% =================================================
\subsection{Hashing}
A problem that arises with any Implementation of a board state
game is how to represent a state with the minimum amount of data.
One solution is Hashing or using a number or string to represent a state.
This Implementation uses a custom hashing method based off of Zobrist
Hashing \cite{Zobrist}used in chess like board games. This hash code is
used to prevent recalculation of the Heuristic function an getting into
a cycle in the algorithm.
\begin{figure}
\begin{lstlisting}
long long int get_hash_value(std::vector<std::vector<int>> &matrix){
long long int result =0;
for(int i=0; i< matrix.size();i++){
for(int j=0; j<matrix[i].size(); j++){
auto row = (long long int) ( (i+1) * std::pow(10, (matrix.size()-1 ) *2));
auto col = (long long int) ( (j+1) * std::pow(10,matrix.size()-1));
long long int temp = row + col+ matrix[i][j];
std::hash<long long int> hasher;
auto _hash = hasher(temp) ;
result ^= _hash + 0x9e3779b9 + (result<<6) + (result>>2);
}
}
\end{lstlisting}
\caption{Hashing Code segment}
\label{fig:Hashing}
\end{figure}
% =================================================
% Search Algorithm function
% =================================================
\subsection{Algorithm}
The algorithm used in this project was a modified version of $A*$.
Some major changes are the Implementation of hashing to improve the
running time by eliminating checks when nodes are in
the frontier or explored section the hashing function gets cached on
the cpu and ram thus causing a moderate speed gain.
% =================================================
% Result
% =================================================
\section{Results}
The ``Path size'' will represent the transition between each state
the ``Cost'' is the cost of the transition state not including the Heuristic function weight.
\\
\subsection{2x2 matrix}
Starting position:
\begin{tabular}{ l }
04 03 \\
02 01
\end{tabular}
$\Rightarrow$
Goal:
\begin{tabular}{ l }
01 02 \\
03 04
\end{tabular}
\\
The results below are from a $\times5$ run on the PC(see \ref{PC}) with a 1000000 $CLOCKS PER SEC$
\\
\begin{tabular}{ | l | l | l | l | l | l | l | }
\hline
Run & Path Size & Cost & Running Time Alg & Cpu Start & Cpu End & Total cpu time\\\hline
1 & 4 & 4 & 0.00037s & 15625 & 15625 & 0s\\\hline
2 & 4 & 4 & 0.00037s & 15625 & 15625 & 0s\\\hline
3 & 4 & 4 & 0.0003685s & 15625 & 15625 & 0s\\\hline
4 & 4 & 4 & 0.0002143s & 15625 & 15625 & 0s\\\hline
5 & 4 & 4 & 0.0002142s & 15625 & 15625 & 0s\\\hline
\end{tabular}
\\
The results below are from a $\times5$ run on Server(see \ref{Server}) with a 1000000 $CLOCKS PER SEC$
\\
\begin{tabular}{ | l | l | l | l | l | l | l | }
\hline
Run & Path Size & Cost & Running Time Alg & Cpu Start & Cpu End & Total cpu time\\\hline
1 & 4 & 4 & 0.000589516s & 12088 & 12672 & 0.000584s\\\hline
2 & 4 & 4 & 0.000521357s & 10037 & 10553 & 0.000516s\\\hline
3 & 4 & 4 & 0.000623622s & 10185 & 10807 & 0.000622s\\\hline
4 & 4 & 4 & 0.000544579s & 9955 & 10497 & 0.000542s\\\hline
5 & 4 & 4 & 0.000579305s & 9709 & 10286 & 0.000577s\\\hline
\end{tabular}
\subsection{3x3 matrix}
Starting position:
\begin{tabular}{ l }
09 08 07 \\
06 05 04 \\
03 02 01
\end{tabular}
$\Rightarrow$
Goal:
\begin{tabular}{ l }
01 02 03 \\
04 05 06 \\
07 08 09
\end{tabular}
\\
The results below are from a $\times5$ run on the PC(see \ref{PC}) with a 1000000 $CLOCKS PER SEC$
\\
\begin{tabular}{ | l | l | l | l | l | l | l | }
\hline
Run & Path Size & Cost & Running Time Alg & Cpu Start & Cpu End & Total cpu time\\\hline
1 & 34 & 34 & 0.0771355s & 31250 & 109375 & 0.078125s\\\hline
2 & 34 & 34 & 0.0402095s & 15625 & 46875 & 0.03125s\\\hline
3 & 34 & 34 & 0.0326797s & 0 & 31250 & 0.03125s\\\hline
4 & 34 & 34 & 0.0325114s & 0 & 31250 & 0.03125s\\\hline
5 & 34 & 34 & 0.0324343s & 0 & 31250 & 0.03125s\\\hline
\end{tabular}
\\
The results below are from a $\times5$ run on Server(see \ref{Server}) with a 1000000 $CLOCKS PER SEC$
\\
\begin{tabular}{ | l | l | l | l | l | l | l | }
\hline
Run & Path Size & Cost & Running Time Alg & Cpu Start & Cpu End & Total cpu time\\\hline
1 & 34 & 34 & 0.08514s & 12444 & 97607 & 0.085163s\\\hline
2 & 34 & 34 & 0.0843959s & 11669 & 96036 & 0.084367s\\\hline
3 & 34 & 34 & 0.0852655s & 11209 & 96469 & 0.08526s\\\hline
4 & 34 & 34 & 0.0837059s & 11251 & 94951 & 0.0837s\\\hline
5 & 34 & 34 & 0.0835822s & 10498 & 94075 & 0.083577s\\\hline
\end{tabular}
\subsection{4x4 matrix}
Starting position:
\begin{tabular}{ l }
16 15 15 13\\
12 11 10 09\\
08 07 06 05\\
04 03 02 01\end{tabular}
$\Rightarrow$
Goal:
\begin{tabular}{ l }
01 02 03 04\\
05 06 07 08\\
09 10 11 12\\
13 14 15 16\end{tabular}
\\
The results below are from a $\times5$ run on the PC(see \ref{PC}) with a 1000000 $CLOCKS PER SEC$
\\
\begin{tabular}{ | l | l | l | l | l | l | l | }
\hline
Run & Path Size & Cost & Running Time Alg & Cpu Start & Cpu End & Total cpu time\\\hline
1 & 153 & 178 & 19.3723s & 15625 & 19375000 & 19.3594s\\\hline
2 & 153 & 178 & 19.2784s & 0 & 19281250 & 19.2812s\\\hline
3 & 153 & 178 & 19.3407s & 15625 & 19281250 & 19.2656s\\\hline
4 & 153 & 178 & 19.2833s & 0 & 19281250 & 19.2812s\\\hline
5 & 153 & 178 & 19.2716s & 0 & 19281250 & 19.2812s\\\hline
\end{tabular}
\\
The results below are from a $\times5$ run on Server(see \ref{Server}) with a 1000000 $CLOCKS PER SEC$
\\
\begin{tabular}{ | l | l | l | l | l | l | l | }
\hline
Run & Path Size & Cost & Running Time Alg & Cpu Start & Cpu End & Total cpu time\\\hline
1 & 153 & 178 & 50.1818s & 12055 & 50126065 & 50.114s\\\hline
2 & 153 & 178 & 50.2406s & 12573 & 50170881 & 50.1583s\\\hline
3 & 153 & 178 & 50.1994s & 11521 & 50125078 & 50.1136s\\\hline
4 & 153 & 178 & 50.3866s & 12512 & 50319531 & 50.307s\\\hline
5 & 153 & 178 & 50.0151s & 11048 & 49937159 & 49.9261s\\\hline
\end{tabular}
\\
\\
The results has the same path size and cost.
From this data the algorithm is consistent with its result with path size.
There for from the data a tentative solution is provided, how ever for larger
numbers it becomes harder to prove minimum cost.
\subsection{Other applications}
The shifting row function
was also consider in some attempts to find new algorithms for encryption \cite{rotation_enc}.
As general Purpose graphic processing unit (GPGPU) can take advantage of basic rotation, this would allow for new forms of encryption.
gpu.cpp contains a partial code segment on generating the first depth needed for a search Heuristic done on the gpu using Opencl. given a little more time
and with the assistance of ``\underline{Massively Parallel A* Search on a GPU}''\cite{a_start_gpu} the solvable solution size would increase to around $n=128$.
$n=129$ over loads my GPGPU, This is most likely due to the gpu on PC (see \ref{PC}) being used to drive 3 1920 x1080 monitors.
\subsection{Output}
Each run generates a file, stored in the data folder, with information
about each run.
Out put file gives the hash for the start and end state \\
The Algorithm runs \\
Then it show the Visited count (explored) and the number
Iterations it went through.\\
The path is out printed next show the transitions\\
Finally the run time stats are printed.
\begin{lstlisting}
Start Hash:706246336224068
Goal Hash:706246335411732
============= Algorithm ===========
Goal Found!!
Visited count:5
Iterations5
1 2
3 4
Cost:1
706246335411732->706246335412049
1 2
4 3
Cost:1
706246335412049->706246336219922
4 2
1 3
Cost:1
706246336219922->706246336224256
4 3
1 2
Cost:1
706246336224256->706246336224068
4 3
2 1
Path size: 4 Cost: 4
Running time Algorithm: 0.000589516s
cpu start: 12088 cpu end:12672 CLOCKS_PER_SEC:1000000
cpu time: 0.000584s
\end{lstlisting}
% =================================================
% Sources of error
% =================================================
\section{Sources of error}
\subsection{Compiler}
Introducing hashing caused some ``error'' into the application.
This is due to how hashing is implemented in the compiler (or windows debugger), running
the code on windows seems to yield a less accrete result, where accuracy
is gauge by minimum cost.
\\
\\
Below is an out put running on PC (see \ref{PC})
\\
**Note: the running time for windows includes
the loading to the debugging library for a pdb file (as window runs the debugger)
\\
Visual C++ \textsuperscript{TM}
\begin{lstlisting}
Path size: 156 Cost: 180
Running time Algorithm: 42.9065s
cpu start: 54 cpu end:42961 CLOCKS_PER_SEC:1000
cpu time: 42.907s
\end{lstlisting}
GNU g++
\begin{lstlisting}
Path size: 153 Cost: 178
Running time Algorithm: 20.0533s
cpu start: 15625 cpu end:19828125 CLOCKS_PER_SEC:1000000
cpu time: 19.8125s
\end{lstlisting}
\subsection{Data structures}
This implementation only allows for n to be less then 5, if 5 is chosen
the std map library will throw a segmentation fault for exceeding size.
If given more time this issue could have been resolved by creating custom
data structures to handle larger hash values and more combinations.
\subsection{Cpu boost}
During some of my runs the cpu on PC (See \ref{PC}) were being affected
the the cpu boosting into higher GHz or speed. this is error is prevalent during $3\times3$
run.
% ================================
% System
% ================================
\section{System}
\subsection{IDE}\label{IDE}
This code was programming in Visual Studio Code\cite{vscode} which an MIT ``Source Code'' editor
and debugger. which also has the
\subsection{PC} \label{PC}
Cpu: Intel(R) Core(TM) i5-6600K CPU @3.50 GHz (stock) boost to 4.10GHz
\\
RAM: 16GB DDR4
\\
Operating System: Window 10
\subsection{Laptop} \label{Laptop}
Cpu: Intel(R) Celeron(TM) N3150 Quad-core 1.60 GHz (stock) boost to 1.8GHz
\\
RAM: 8GB DDR3L
\\
Operating System: Window 10
\subsection{Server} \label{Server}
Cpu Intel(R) Xenon(R) CPU E5345 @ 2.33 GHZ (4 cores allocated to vm with hyper-threading)
\\
RAM: 8GB DDR2 - ECC (Error correcting Memory)
\\
Operating System: Ubuntu Server (running on a hypervisor)
\bibliographystyle{unsrt}
\bibliography{bib}
\end{document} | {
"alphanum_fraction": 0.6632093198,
"avg_line_length": 34.0281329923,
"ext": "tex",
"hexsha": "d6f0694fb39bd565ac6ee80896157fc7eb3fdbf6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3809f778854d61576649a4d822141c5d00547ae8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bluemner/heuristics_project",
"max_forks_repo_path": "report/report.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3809f778854d61576649a4d822141c5d00547ae8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bluemner/heuristics_project",
"max_issues_repo_path": "report/report.tex",
"max_line_length": 159,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3809f778854d61576649a4d822141c5d00547ae8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bluemner/heuristics_project",
"max_stars_repo_path": "report/report.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4315,
"size": 13305
} |
% !TEX root = ../zeth-protocol-specification.tex
\section{\zeth~statement}\label{zeth-protocol:statement}
As explained in~\cite{zethpaper}, the $\mix$ function of $\mixer$ verifies the validity of $\zkp$ on the given primary inputs in order to determine whether the state transition is valid. As such, $\mixer$ verifies whether for $\zkp$, and primary input $\priminputs$, there exists an auxiliary input $\auxinputs$, such that the tuple $\smalltuple{\priminputs, \auxinputs}$ satisfies the $\npol$-relation $\RELCIRC$, consisting of the following constraints:
\begin{itemize}
\item For each $i \in [\jsin]$:
\begin{enumerate}
\item $\auxinputs.\jsins{i}.\znote.\apk = \prfaddr{\auxinputs.\jsins{i}.\ask}{0}$
\item $\auxinputs.\jsins{i}.\cm{} = \comm.\commit{\auxinputs.\jsins{i}.\znote.\apk,\allowbreak \auxinputs.\jsins{i}.\znote.\rrho,\allowbreak \auxinputs.\jsins{i}.\znote.\notev}{\auxinputs.\jsins{i}.\znote.\noter{}}$
\item $\auxinputs.\jsins{i}.\nf{} = \prfnf{\auxinputs.\jsins{i}.\ask}{\auxinputs.\jsins{i}.\znote.\rrho}$
\item $\auxinputs.\htags{i} = \prfpk{\auxinputs.\jsins{i}.\ask}{i, \auxinputs.\hsig}$ (non-malleability, see~\cref{appendix:trnm})
\item $(\auxinputs.\jsins{i}.\znote.\notev) \cdot (1 - e) = 0$ is satisfied for the boolean value $e$ set such that if $\auxinputs.\jsins{i}.\znote.\notev > 0$ then $e = 1$.
\item The Merkle root $\mkroot'$ obtained after checking the Merkle authentication path $\auxinputs.\jsins{i}.\mkpath$ of commitment $\auxinputs.\jsins{i}.\cm{}$, with $\mkhash$, equals to $\priminputs.\mkroot$ if $e = 1$.
\item $\priminputs.\nfs{i}$ \\ $= \indexedset{\pack{\slice{\auxinputs.\jsins{i}.\nf{}}{k \cdot \fieldBitCap}{(k+1) \cdot \fieldBitCap}}{\FFx{\rCURVE}}}{k \in [\floor{\prfNfOutLen/\fieldBitCap}]}$ (see~\cref{instantiation:statement:pack} for definition of $\pack{}{}$)
\item $\priminputs.\htags{i}$ \\ $= \indexedset{\pack{\slice{\auxinputs.\htags{i}}{k \cdot \fieldBitCap}{(k+1) \cdot \fieldBitCap}}{\FFx{\rCURVE}}}{k \in [\floor{\prfPkOutLen/\fieldBitCap}]}$ (see~\cref{instantiation:statement:pack} for definition of $\pack{}{}$)
\end{enumerate}
\item For each $j \in [\jsout]$:
\begin{enumerate}
\item $\auxinputs.\znotes{j}.\rrho = \prfrho{\auxinputs.\pphi}{j, \auxinputs.\hsig}$ (non-malleability, see~\cref{appendix:trnm})
\item $\priminputs.\cms{j} =\comm.\commit{\auxinputs.\znotes{j}.\apk,\allowbreak \auxinputs.\znotes{j}.\rrho,\allowbreak \auxinputs.\znotes{j}.\notev}{\auxinputs.\znotes{j}.\noter{}}$
\end{enumerate}
\item $\priminputs.\hsig = \indexedset{\pack{\slice{\auxinputs.\hsig}{k \cdot \fieldBitCap}{(k+1) \cdot \fieldBitCap}}{\FFx{\rCURVE}}}{k \in [\floor{\crhhsigOutLen/\fieldBitCap}]}$ (see~\cref{instantiation:statement:pack} for definition of $\pack{}{}$)
\item $\priminputs.\resbits = \packResBits{\indexedset{\auxinputs.\jsins{i}.\nf{}}{i \in [\jsin]}, \auxinputs.\vin, \auxinputs.\vout, \auxinputs.\hsig, \indexedset{\auxinputs.\htags{i}}{i \in [\jsin]}}$ (see~\cref{instantiation:statement:pack} for definition of $\packResBits{}$)
\item Check that the ``\gls{joinsplit} is balanced'', i.e.~check that the \gls{joinsplit-eq} holds:\footnote{where $\pack{x}{\FFx{\rCURVE}}$ outputs the numerical value of $x$ in $\FFx{\rCURVE}$. We rely on the fact that $\zvalueLen < \fieldBitCap$ to perform this sum.}
\begin{align*}
&\pack{\auxinputs.\vin}{\FFx{\rCURVE}} + \sum_{i \in [\jsin]} \pack{\auxinputs.\jsins{i}.\znote.\notev}{\FFx{\rCURVE}} \\
& = \sum_{j \in [\jsout]} \pack{\auxinputs.\znotes{j}.\notev}{\FFx{\rCURVE}} + \pack{\auxinputs.\vout}{\FFx{\rCURVE}}
\end{align*}
\end{itemize}
| {
"alphanum_fraction": 0.6610486891,
"avg_line_length": 116.8125,
"ext": "tex",
"hexsha": "505cdda1c357bad1d092bed5bc503d1f909b5bfe",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-07-26T04:51:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-07-26T04:51:29.000Z",
"max_forks_repo_head_hexsha": "ba29c67587395f5c7b26b52ee7ab9cba12f1cc6b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "clearmatics/zeth-specifications",
"max_forks_repo_path": "chapters/chap02-sec02.tex",
"max_issues_count": 13,
"max_issues_repo_head_hexsha": "ba29c67587395f5c7b26b52ee7ab9cba12f1cc6b",
"max_issues_repo_issues_event_max_datetime": "2021-04-16T10:57:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-27T10:41:50.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "clearmatics/zeth-specifications",
"max_issues_repo_path": "chapters/chap02-sec02.tex",
"max_line_length": 455,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ba29c67587395f5c7b26b52ee7ab9cba12f1cc6b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "clearmatics/zeth-specifications",
"max_stars_repo_path": "chapters/chap02-sec02.tex",
"max_stars_repo_stars_event_max_datetime": "2021-04-29T18:22:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-04-29T18:22:00.000Z",
"num_tokens": 1298,
"size": 3738
} |
\chapter{DCCP Agents}
\label{sec:dccpAgents}
\section{DCCP Agents}
\label{sec:dccpagent}
This section describes the operation of the DCCP agents in \ns.
At current \ns implementation, there are two major congestion
control of DCCP agents: CCID2 and CCID3.
It is a symmetric two-way agent in the sense that it represents
both a sender and receiver.
DCCP for \ns is still under development.
The files described in this section are too numerous to enumerate here.
Basically it covers most files matching the regular expression
\nsf{dccp*.\{cc, h\}}.
Applications can access DCCP agents via the \fcn[]{sendmsg} function in C++,
or via the \code{send} or \code{sendmsg} methods in OTcl, as described in
section \ref{sec:systemcalls}.
The following is a simple example of how a DCCP CCID2 agent may be used in a program.
In the example, the CBR traffic generator is started at time 1.0, at which time
the generator begins to periodically call the DCCP agent \fcn[]{sendmsg}
function.
\begin{program}
set ns [new Simulator]
set sender [$ns node]
set receiver [$ns node]
$ns duplex-link $sender $receiver 5Mb 2ms DropTail
set dccp0 [new Agent/DCCP/TCPlike]
$dccp0 set window_ 7000
set dccpsink0 [new Agent/DCCP/TCPlike]
$ns attach-agent $sender $dccp0
$ns attach-agent $receiver $dccpsink0
set cbr0 [new Application/Traffic/CBR]
$cbr0 attach-agent $dccp0
$cbr0 set packetSize_ 160
$cbr0 set rate_ 80Kb
$ns connect $dccp0 $dccpsink0
$ns at 1.0 "$cbr0 start"
\end{program}
The following example uses DCCP CCID3.
\begin{program}
set ns [new Simulator]
set sender [$ns node]
set receiver [$ns node]
$ns duplex-link $sender $receiver 5Mb 2ms DropTail
set dccp0 [new Agent/DCCP/]
set dccpsink0 [new Agent/DCCP/TFRC]
$ns attach-agent $sender $dccp0
$ns attach-agent $receiver $dccpsink0
set cbr0 [new Application/Traffic/CBR]
$cbr0 attach-agent $dccp0
$cbr0 set packetSize_ 160
$cbr0 set rate_ 80Kb
$ns connect $dccp0 $dccpsink0
$ns at 1.0 "$cbr0 start"
\end{program}
\section{Commands at a glance}
\label{sec:dccpcommand}
The following commands are used to setup DCDP agents in simulation scripts:
\begin{flushleft}
\code{set dccp0 [new Agent/DCCP/TCPlike]}\\
This creates an instance of the DCCP CCID2 agent.
\code{set dccp0 [new Agent/DCCP/TFRC]}\\
This creates an instance of the DCCP CCID3 agent.
\code{$ns_ attach-agent <node> <agent>}\\
This is a common command used to attach any <agent> to a given <node>.
\code{$traffic-gen attach-agent <agent>}\\
This a class Application/Traffic/<traffictype> method which connects the
traffic generator to the given <agent>. For example, if we want to setup
a CBR traffic flow for the dccp agent, dccp0, we given the following commands\\
\begin{program}
set cbr1 [new Application/Traffic/CBR]
$cbr1 attach-agent $dccp0
\end{program}
For a more complex example of setting up an DCCP agent used in a simulation, see
the example code in tcl/ex folder.
\end{flushleft}
\endinput | {
"alphanum_fraction": 0.7035286704,
"avg_line_length": 33.0625,
"ext": "tex",
"hexsha": "57106e6c1d3c0a2f7d1590db4d2daf6a4f5c09c6",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-09-29T16:06:57.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-29T16:06:57.000Z",
"max_forks_repo_head_hexsha": "f037b796ff10300ffe0422580be5855c37d0b140",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nitishk017/ns2project",
"max_forks_repo_path": "ns-allinone-2.35/ns-2.35/doc/dccp.tex",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "f037b796ff10300ffe0422580be5855c37d0b140",
"max_issues_repo_issues_event_max_datetime": "2019-01-22T21:41:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-01-20T17:35:23.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nitishk017/ns2project",
"max_issues_repo_path": "ns-allinone-2.35/ns-2.35/doc/dccp.tex",
"max_line_length": 87,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f037b796ff10300ffe0422580be5855c37d0b140",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nitishk017/ns2project",
"max_stars_repo_path": "ns-allinone-2.35/ns-2.35/doc/dccp.tex",
"max_stars_repo_stars_event_max_datetime": "2020-05-29T13:04:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-29T13:04:42.000Z",
"num_tokens": 929,
"size": 3174
} |
\documentclass{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
% Required to call the command \operatorname{name} and \DeclareMathOperator{command}{definition}
\usepackage{amsmath}
% Required to render the clickable url
\usepackage[hyphens]{url}
\usepackage{hyperref}
% Declare a new math function
\DeclareMathOperator{\abc}{abc}
\begin{document}
\section*{Existing functions}
\LaTeX{} comes with a lot of built-in mathematical functions, such as $\cos x$, $\ln x$ or $\max x$.
A complete list of symbols can be found in the \emph{The Comprehensive LATEX Symbol List} hosted in the \emph{Best Practices} part of this repository (\url{https://github.com/ZenLulz/LatexCompendium/blob/master/best-practices/the-comprehensive-latex-symbol-list.pdf}).
\section*{Custom functions}
\subsection*{The command \emph{operatorname}}
The following formula uses a custom function called \emph{abc}.
\[\operatorname{abc} x\]
\subsection*{The command \emph{DeclareMathOperator}}
This command enables to define math functions or operators in the preamble so they can be reused everywhere.
\[\abc x\]
\subsection*{The command \emph{mathrm}}
Many people use the command \emph{mathrm}, nevertheless this leads to space issue, as illustrated below.
\[\mathrm{abc}x\]
\end{document} | {
"alphanum_fraction": 0.7670278638,
"avg_line_length": 30.7619047619,
"ext": "tex",
"hexsha": "320f3f7d3fb3ab7b2042f12eb47775ae165cdf12",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cc623a88ab05ca90430338333003293baea00f8c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ZenLulz/LatexCompendium",
"max_forks_repo_path": "compendium/mathematics/functions-and-custom-functions.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cc623a88ab05ca90430338333003293baea00f8c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ZenLulz/LatexCompendium",
"max_issues_repo_path": "compendium/mathematics/functions-and-custom-functions.tex",
"max_line_length": 268,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "cc623a88ab05ca90430338333003293baea00f8c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ZenLulz/LatexCompendium",
"max_stars_repo_path": "compendium/mathematics/functions-and-custom-functions.tex",
"max_stars_repo_stars_event_max_datetime": "2019-09-23T20:16:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-07-30T21:43:55.000Z",
"num_tokens": 340,
"size": 1292
} |
\hypertarget{heterogeneous-shared-memory-systems}{%
\section{Heterogeneous Shared Memory
Systems}\label{heterogeneous-shared-memory-systems}}
\hypertarget{motivation-performance-increase}{%
\subsection{Motivation: Performance
Increase}\label{motivation-performance-increase}}
\begin{itemize}
\tightlist
\item
In the beginning one was able to increase the performance by
increasing the CPU frequency. This is not possible or not that easy
anymore, because the frequency is already quite high.
\item
By shrinking the CMOS circuitry, the engineers were able to put more
cores on the CPU and increase the performance with this.
\item
The next big performance increase is reached with heterogeneity.
Several workload characteristics can be handled by different processor
architectures.
\end{itemize}
\hypertarget{workload-classes}{%
\subsubsection{Workload Classes}\label{workload-classes}}
To distribute the different workload characteristics to different
processor architectures, we can define different workload classes.
\begin{itemize}
\tightlist
\item
Different workload behaviors
\begin{itemize}
\tightlist
\item
Control intensive (e.g.~searching, sorting)
\item
Data intensive (e.g.~image processing)
\item
compute intensive (e.g.~numerical methods)
\end{itemize}
\item
Different workload classes need different hardware architecture
\begin{itemize}
\tightlist
\item
e.g.~for control intensiv applications: superscalar CPUs
\item
e.g.~for data intensive applications: vector or SIMD architectures
\end{itemize}
\end{itemize}
\hypertarget{heterogeneous-systems}{%
\subsection{Heterogeneous Systems}\label{heterogeneous-systems}}
A system architecture (maintained by the HSA Foundation) that allows
accelerators, e.g.~GPUs, to operate at the processing level as the
system's CPU.
The goals are
\begin{itemize}
\tightlist
\item
different combinations of CPU and GPU processor cores operate as a
unified processing engine
\item
higher performance and lower power consumption
\end{itemize}
\clearpage
\hypertarget{device-architectures}{%
\subsubsection{Device Architectures}\label{device-architectures}}
\begin{itemize}
\tightlist
\item
SIMD and Vector Processing
\begin{itemize}
\tightlist
\item
Single instruction multiple data
\item
One instruction is applied to multiple datasets at the same time
\end{itemize}
\item
Hardware Multithreading
\begin{itemize}
\tightlist
\item
multiple independent instruction streams (threads) are executed
concurrently
\item
Simultaneous Multithreading (SMT): instructions from multiple
threads are interleaved on the execution resources
\end{itemize}
\item
Multi-Core Architectures
\begin{itemize}
\tightlist
\item
in the simplest case, each of the cores executes largely
independently, sharing data through the memory system, usually
through a cache coherency protocol
\item
multi-core systems (both CPUs and GPUs) can come in very different
variants
\end{itemize}
\item
Systems-on-Chip and the APU
\begin{itemize}
\tightlist
\item
complicated systems-on-chip (SoC) combine varied components into a
compact and cost-effective design
\item
benefits: lower manufacturing costs, smaller form factor, less power
consumption
\end{itemize}
\end{itemize}
\hypertarget{gpu-architectures}{%
\subsubsection{GPU Architectures}\label{gpu-architectures}}
GPUs are designed to process graphics workload consisting of complex
vertex, geometry, and pixel processing task graphs.
\begin{itemize}
\tightlist
\item
A GPU consists of several compute units (processing elements)
\item
The compute unit has several Threads
\item
Each threads has a private memory
\item
All threads can access a shared memory on the local compute unit
\end{itemize}
\begin{figure}[H]
\centering
\includegraphics[width=0.7\textwidth]{figures/gpu_architecture.png}
\caption{GPU Architecture}
\end{figure}
\hypertarget{programming-heterogeneous-systems}{%
\subsection{Programming Heterogeneous
Systems}\label{programming-heterogeneous-systems}}
\begin{itemize}
\tightlist
\item
C++ Accelerated Massive Parallelism (AMP)
\begin{itemize}
\tightlist
\item
open specification from Microsoft for implementing data parallelism
directly in C++
\end{itemize}
\item
Compute Unified Device Architecture (CUDA)
\begin{itemize}
\tightlist
\item
parallel computing platform and programming model created by NVIDIA
and implemented by the GPUs of NVIDIA
\end{itemize}
\item
OpenACC
\begin{itemize}
\tightlist
\item
programming standard for parallel computing developed by Cray, CAPS,
NVIDIA, and PGI
\end{itemize}
\item
Open Computing Language (OpenCL)
\begin{itemize}
\tightlist
\item
C99 based language and framework for programming heterogeneous
platforms
\end{itemize}
\end{itemize}
\clearpage
\hypertarget{amp-overview}{%
\subsubsection{AMP Overview}\label{amp-overview}}
AMP code that cannot be run on GPUs will fall back onto one or more CPUs
instead and use SSE instructions
\begin{lstlisting}[language=C++]
#include <amp.h>
#include <iostream>
using namespace concurrency;
const int s = 5;
void vecadd() {
int aH[] = {1, 2, 3, 4, 5};
int bH[] = {6, 7, 8, 9, 10};
int rH[s];
//1 indicates the dimension of the vector. One could use up to 3 dimensions for a vector.
array_view<const int, 1> a(s, aH);
array_view<const int, 1> b(s, bH);
array_view<int, 1> r(s, rH);
r.discard_data(); //r should not be sent to the GPU. r is only meant to be the result vector.
parallel_for_each(
r.extent,
[=](index<1> idx) restrict(amp)
{
r[idx] = a[idx] + b[idx];
});
// wait until the GPU has finished
//and the result has been copied
// back to rH
r.synchronize();
for (int i = 0; i < s; i++) {
std::cout << rH[i] << "\n";
}
}
\end{lstlisting}
\hypertarget{cuda-overview}{%
\subsubsection{CUDA Overview}\label{cuda-overview}}
CUDA gives program developers direct access to the virtual instruction
set and memory of the parallel computational elements in NVIDIA CUDA
GPUs. C/C++ programmers use `CUDA C/C++', compiled with ``nvcc''.
\begin{lstlisting}[language=C++]
//This code doesn't show how the data is transfered to the GPU. Instead, this code will be run directly on the GPU. The code identiefies on which thread it is running and gets the proper data (regarding the thread ID) out of the local computing memory.
__global__ void vecadd(int *a, int *b, int *r) {
// get the workitem's unique ID
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
// add two vector elements
r[idx] = a[idx] + b[idx];
}
\end{lstlisting}
\clearpage
\hypertarget{openacc-overview}{%
\subsubsection{OpenACC Overview}\label{openacc-overview}}
Open Acc is designed to simplify parallel programming of heterogeneous
systems. Like in OpenMP, the programmer can annotate C, C++ and Fortran
source code to identify the areas that should be accelerated using
PRAGMA compiler directives.
\begin{lstlisting}[language=C++]
void vecadd(int *restrict r, int *a, int *b, int n) {
#pragma acc kernels loop copyin(a[0:n],b[0:n]) copyout(r[0:n])
for(int i = 0; i < n; ++i) r[i] = a[i] + b[i];
}
\end{lstlisting}
\hypertarget{introduction-to-opencl}{%
\subsection{Introduction to OpenCL}\label{introduction-to-opencl}}
Goal
\begin{itemize}
\tightlist
\item
Use all computational resources in system (CPU \& GPU)
\item
Efficient parallel programming model, based on C99
\end{itemize}
Kernel
\begin{itemize}
\tightlist
\item
Basic unit of executable code - similar to a C function
\item
A kernel is a function which is executed on the GPU
\end{itemize}
Program
\begin{itemize}
\tightlist
\item
Collection of kernels and other functions
\item
Analogous to a dynamic library
\end{itemize}
\begin{lstlisting}[language=OpenCL]
kernel void
dp_mul(global const float *a,
global const float *b,
global float *result)
{
int id = get_global_id(0);
result[id] = a[id] * b[id];
}
// execute dp_mul over "n" work-items
\end{lstlisting}
\begin{figure}[H]
\centering
\includegraphics[width=0.5\textwidth]{figures/openClMemoryScheme.png}
\caption{OpenCL Memory Model}
\end{figure}
\begin{itemize}
\tightlist
\item
Private Memory
\begin{itemize}
\tightlist
\item
Per Work-item
\end{itemize}
\item
Local Memory
\begin{itemize}
\tightlist
\item
Shared within a workgroup
\end{itemize}
\item
Local Global / Constant Memory
\begin{itemize}
\tightlist
\item
Not synchronized
\end{itemize}
\item
Host Memory
\begin{itemize}
\tightlist
\item
On the CPU
\end{itemize}
\end{itemize}
\hypertarget{compilation}{%
\subsubsection{Compilation}\label{compilation}}
OpenCL uses dynamic compilation model (at runtime).
\begin{itemize}
\tightlist
\item
Step 1 : The code is complied to an Intermediate Representation (IR),
which is usually an assembler of a virtual machine.
\item
Step 2: The IR is compiled to a machine code for execution. This step
is much shorter.
\end{itemize}
\clearpage
\hypertarget{opencl-objects}{%
\subsubsection{OpenCL Objects}\label{opencl-objects}}
\begin{itemize}
\tightlist
\item
Setup
\begin{itemize}
\tightlist
\item
Devices - GPU, CPU, Cell/B.E.
\item
Contexts - Collection of devices
\item
Queues - Submit work to the device (one task to CPU, one to GPU,
etc.)
\end{itemize}
\item
Memory
\begin{itemize}
\tightlist
\item
Buffers - Blocks of memory
\item
Images - 2D or 3D formatted images (own data structures)
\end{itemize}
\item
Execution
\begin{itemize}
\tightlist
\item
Programs - Collections of kernels
\item
Kernels - Argument/execution instances
\end{itemize}
\item
Synchronization/profiling
\begin{itemize}
\tightlist
\item
Events
\end{itemize}
\end{itemize}
\hypertarget{setup}{%
\subsubsection{Setup}\label{setup}}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Get the device(s)
\item
Create a context
\item
Create command queue(s)
\end{enumerate}
\begin{lstlisting}[language=C++]
cl_uint num_devices_returned;
cl_device_id devices[2];
err = clGetDeviceIDs(NULL, CL_DEVICE_TYPE_GPU, 1, &devices[0], num_devices_returned);
err = clGetDeviceIDs(NULL, CL_DEVICE_TYPE_CPU, 1, &devices[1], &num_devices_returned);
cl_context context;
context = clCreateContext(0, 2, devices, NULL, NULL, &err);
cl_command_queue queue_gpu, queue_cpu;
queue_gpu = clCreateCommandQueue(context, devices[0], 0, &err);
queue_cpu = clCreateCommandQueue(context, devices[1], 0, &err);
\end{lstlisting}
\clearpage
\begin{itemize}
\tightlist
\item
Devices
\begin{itemize}
\tightlist
\item
Multiple cores on a CPU or a GPU are presented as a single device
\item
OpenCL executes kernels across all cores in a data-parallel manner
\end{itemize}
\item
Contexts
\begin{itemize}
\tightlist
\item
Enable sharing of memory between devices
\item
To share between devices, both devices must be in the same context
\end{itemize}
\item
Queues
\begin{itemize}
\tightlist
\item
All work submitted through queues
\item
Each device must have a queue
\end{itemize}
\end{itemize}
\hypertarget{read-and-write}{%
\subsubsection{Read and Write}\label{read-and-write}}
\textbf{Read from a region in memory object to host
memory}
\begin{lstlisting}[language=C++]
clEnqueueReadBuffer(queue, object, blocking, offset, size, *ptr, ...)
\end{lstlisting}
\textbf{Write to a region in memory object from host memory}
\begin{lstlisting}[language=C++]
clEnqueueWriteBuffer(queue, object, blocking, offset, size, *ptr, ...)
\end{lstlisting}
\hypertarget{data-types}{%
\subsubsection{Data Types}\label{data-types}}
\begin{itemize}
\tightlist
\item
Scalar data types
\begin{itemize}
\tightlist
\item
char , uchar, short, ushort, int, uint, long, ulong
\item
bool, intptr\_t, ptrdiff\_t, size\_t, uintptr\_t, void, half
(storage)
\end{itemize}
\item
Image types
\begin{itemize}
\tightlist
\item
image2d\_t, image3d\_t, sampler\_t
\end{itemize}
\item
Vector data types
\begin{itemize}
\tightlist
\item
Portable
\item
Vector length of 2, 4, 8, and 16
\item
char2, ushort4, int8, float16, double2, \ldots{}
\item
Endian safe
\item
Aligned at vector length
\item
Vector operations and built-in functions
\end{itemize}
\end{itemize}
\clearpage
\hypertarget{programming-in-opencl}{%
\subsubsection{Programming in OpenCL}\label{programming-in-opencl}}
In General: 3 Major Code Blocks
\begin{itemize}
\tightlist
\item
OpenCL device program: kernels and subroutines
\begin{itemize}
\tightlist
\item
operations executed by the work items
\item
C99 based syntax with vector operations
\end{itemize}
\item
C++ host program: device and kernel preparation (reusable)
\begin{itemize}
\tightlist
\item
platform and device handling
\item
creating contexts and command queues
\item
compiling OpenCL device programs
\end{itemize}
\item
C++ host program: data and device program enqueing
\begin{itemize}
\tightlist
\item
data allocation and management
\item
filling in command queues
\item
setting kernel arguments
\item
running kernels
\item
event handling
\end{itemize}
\end{itemize}
\hypertarget{opencl-example-vector-addition}{%
\subsection{OpenCL Example: Vector
Addition}\label{opencl-example-vector-addition}}
??
\clearpage | {
"alphanum_fraction": 0.7289460229,
"avg_line_length": 22.856427379,
"ext": "tex",
"hexsha": "1b55337ff8d6b414862b3ad3668936bdb224caf6",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-09-15T07:10:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-09-15T07:10:24.000Z",
"max_forks_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59",
"max_forks_repo_licenses": [
"Beerware"
],
"max_forks_repo_name": "nortismo/mse-documentations",
"max_forks_repo_path": "TSM_ProgAlg/04_HeterogeneousSharedMemorySystems.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Beerware"
],
"max_issues_repo_name": "nortismo/mse-documentations",
"max_issues_repo_path": "TSM_ProgAlg/04_HeterogeneousSharedMemorySystems.tex",
"max_line_length": 252,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cc67637785237d630f077a863edcd5f49aa52b59",
"max_stars_repo_licenses": [
"Beerware"
],
"max_stars_repo_name": "nortismo/mse-documentations",
"max_stars_repo_path": "TSM_ProgAlg/04_HeterogeneousSharedMemorySystems.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3906,
"size": 13691
} |
\documentclass{article}
\usepackage{nonotation}
\usepackage{amssymb}
\usepackage{mathpazo}
\PassOptionsToPackage{hyphens}{url}
\usepackage{hyperref}
\hypersetup{
colorlinks,
linkcolor = {red!70!black},
citecolor = {blue!50!black},
urlcolor = {blue!80!black}
}
% Hyperlink dingbat
\usepackage{pifont}
\newcommand{\linkicon}{\ding{226}}
\usepackage{listings}
%\usepackage{inconsolata}
\lstdefinestyle{posh}
{
backgroundcolor=\color{white},
basicstyle=\scriptsize\color{black}\ttfamily
}
\title{Report --- PhaseKing statistical model check}
\author{Andrea Proietto}
\begin{document}
\maketitle
\section{Objective}
This report is about an actual software implementing the \emph{Phase-King} consensus protocol; the objective of the former is to verify the software's correctness by means of a specific method of statistical model checking, that is \emph{Hypothesis testing}.
\subsection{Statistical model checking}
To model a system's successes and failures at a most coarse and generalized level, Bernoulli random variables can be used. Let $S$ be such variable, and say that $S = 1$ expresses that the system has failed, and $S = 0$ otherwise. Since it is unknown how likely the system will fail \emph{a priori}, a tentative estimate for the probability of failure is defined in $\varepsilon$; the goal then becomes to prove that a system failure occurs with no more probability than $\varepsilon$:
\[
\Pr[S = 1] < \varepsilon
\]
Analogously, the objective can be to disprove the contrary, which is that a failure is at least as likely to happen than the established threshold $\varepsilon$:
\[
\Pr[S = 1] \geq \varepsilon
\]
% \footnote{In the PhaseKing case, no relationship is present across single runs, therefore the runs' results are independent of each other. In a general case, the system state across runs does factor in choosing an input that maximizes failure probability}
This last statement will be the \emph{hypothesis} that this model checking method will want to disprove. The main idea is to run the system multiple times, each time with random independent inputs, until either a failure is encountered, or a run count $N$ established beforehand is reached. The multiple runs do make for a geometric random variable $X$ grounded on the original variable $S$, which represents how many runs are needed before the system fails.
Going further with the failure hunting mindset, the method ideally aims to disprove that a failure is guaranteed to happen in the $N$ runs:
\[
\Pr[X \leq N \knowing \Pr[S = 1] \geq \varepsilon] = 1
\]
And this arguably can be verified if $N$ is infinite; to keep things practical, the method settles for an arbitrarily large $N$, and transforms the no-failure guarantee into a high probability of success regulated by a \emph{confidence factor} $\delta$:
\[
\Pr[X \leq N \knowing \Pr[S = 1] \geq \varepsilon] \geq 1 - \delta
\]
This is finally the whole statement that the method wants to disprove: if after $N$ runs the system doesn't fail, then then a hypothetical failure is very unlikely to happen, regulated by the values of $\varepsilon$ and $\delta$:
\[
\Pr[X > N \knowing \Pr[S = 1] \geq \varepsilon] < \delta
\]
The number of runs was introduced arbitrarily, but can be computed once both $\varepsilon$ and $\delta$ are chosen:
\[
N = \frac{\log \delta}{\log (1 - \varepsilon)}
\]
\section{Implementation overview}
\begin{figure}[ht]
\centering
\begin{tikzpicture}
\draw
(0, 0) node (a) [draw, rectangle] {Node}
(2, 0) node (b) [draw, rectangle] {Node}
(0, 2) node (c) [draw, rectangle] {Node}
(2, 2) node (d) [draw, rectangle] {Node}
(7, 1) node (coord) [draw, rectangle] {Coordinator}
(a) -- (b)
(a) -- (c)
(a) -- (d)
(b) -- (c)
(b) -- (d)
(c) -- (d)
;
\draw[color = gray]
(b) -- (coord)
(d) -- (coord)
(a) .. controls (2, -0.7) .. (coord)
(c) .. controls (2, 2.7) .. (coord)
;
\end{tikzpicture}
\caption{An instance of the complete system with 4 nodes participating in the Phase-King protocol}
\label{fig:pk4ex}
\end{figure}
The whole system, along with the controlling logic, is implemented by means of Docker containers built from the OpenJDK15 image. The protocol itself is realized by a set of containers called \emph{nodes}, of which the shared source code is \texttt{Node.java}. The nodes are controlled by an additional container called the \emph{Coordinator}, and its source code is \texttt{Coordinator.java}. An example of the system is depicted in figure \ref{fig:pk4ex}, where the protocol implementation is tested on four nodes.
The whole system is runnable from command-line by issuing two Docker commands, the first being necessary only on first run or parameter changes:
\begin{lstlisting}[style=posh]
> docker-compose build
> docker-compose up
\end{lstlisting}
The parameters intended for tuning are in the file \texttt{.env}; of relevance are \texttt{PERC\_SUCCESS}, which is equivalent to $(1 - \varepsilon) / 100$, and \texttt{CONFIDENCE} which is $\delta$. Notably, the number of nodes \texttt{NODE\_COUNT} can be tuned too.
\subsection{The protocol}
The protocol implementation follows closely the definition in \cite{kshe08},\footnote{\linkicon \href
{http://wwwusers.di.uniroma1.it/~stefa/Distributed_Systems/Schedule_files/consensus.pdf}
{\textsf{``Consensus --- Distributed Systems course material, La Sapienza''}}} of which a sketch for node behaviour is reported here:
\begin{enumerate}
\item The node is assigned a unique identifier $i$ from the range $(1, n)$, where $n$ is the number of nodes;
\item It then picks an initial binary value $v_i$, on which all nodes must agree upon at the end of protocol execution;
\item For each phase $p$ from $1$ to $n/4 + 1$, where $n$ is the number of nodes:
\begin{enumerate}
\item[\textsc{Round 1}.] Broadcast $v_i$ to all peers, then receive all other $v_j$ from them, and compute which binary value $m_i$ has the majority among all values, and how much is the majority $c$;
\item[\textsc{Round 2}.] The node decides if it's the Phase King in the current phase ($p = i$):
\begin{itemize}
\item if so, then it broadcasts its own value $m_i$ to all peers, which in turn will serve as tiebreaker to them; furthermore, $m_i$ becomes the next $v_i$ for the new phase;
\item otherwise, the next $v_i$ will be either $b_i$ or the tiebreaker from the King, depending on whether the majority $c$ crosses a certain threshold ($c > n / 2 + f$).
\end{itemize}
\end{enumerate}
\end{enumerate}
\subsection{The control mechanism}
The coordinator is responsible for:
\begin{itemize}
\item Ensuring that all nodes are connected;
\item Deciding the behaviour for each node, either honest or byzantine;
\item Synchronizing the nodes' status and phases;
\item Enforcing statistical model checking on the protocol implementation, and reporting the results.
\end{itemize}
When launched, the coordinator will compute the number of runs required to enforce hypothesis testing, and when finished, will report whether the process has been successful or not. Other than that, the coordinator does not partake in the Phase-King protocol in any way.
\pagebreak
\section{Results and the future}
The software has been run with varying values for the number of nodes, lower bound for probability of success, and confidence factor, with the results shown in table \ref{table:results}. Each check also reports the number of tests performed in order to enforce the thesis of hypothesis testing.
\begin{table}[ht]
\caption{Model checking results}
\centering
\vbox{}
\begin{tabular}{cc|cc|c}
\hline\hline\noalign{\smallskip}
\# Nodes & \shortstack[c]{Phases per \\ session} & \shortstack[c]{Success \\ guarantee} & \shortstack[c]{Confidence \\ factor} & \shortstack[c]{Outcome \\ (\# Tests)} \\[0.5ex]
\hline\noalign{\smallskip}
10 & 2 & 99\% & 0.001 & \checkmark(688) \\
-- & -- & 99\% & 0.0001 & \checkmark(917) \\
-- & -- & 99.9\% & 0.001 & \checkmark(6905) \\
-- & -- & 99.9\% & 0.0001 & \checkmark(9206) \\
15 & 3 & 99\% & 0.001 & \checkmark(688) \\
-- & -- & 99\% & 0.0001 & \checkmark(917) \\
-- & -- & 99.9\% & 0.001 & \checkmark(6905) \\
-- & -- & 99.9\% & 0.0001 & \checkmark(9206) \\
30 & 7 & 99\% & 0.001 & \checkmark(688) \\
-- & -- & 99\% & 0.0001 & \checkmark(917) \\
-- & -- & 99.9\% & 0.001 & \checkmark(6905) \\
-- & -- & 99.9\% & 0.0001 & \checkmark(9206) \\[1ex]
\hline
\end{tabular}
\label{table:results}
\end{table}
So far, all tests are passed successfully, including the last and most demanding test one, which guarantees that a network of 30 nodes reaches consensus with a reliability of 99.9\% and a confidence factor of $10^{-4}$.
Tests with stricter parameters have not been performed due to current technical limitations, and the assumption that these results are good enough as is. With more computational power, and the use of Docker's swarm functionality, it is expected that the parameters can be brought to extreme values, while still guaranteeing the software's correctness.
\begin{thebibliography}{9}
\bibitem{kshe08}
\textsc{A. D. Kshemkalyani, M. Singhal},
\textit{Distributed Computing: Principles, Algorithms, and Systems}, page 527,
Cambridge University Press, 2008
\end{thebibliography}
\end{document} | {
"alphanum_fraction": 0.6533204072,
"avg_line_length": 49.119047619,
"ext": "tex",
"hexsha": "d1d15b95e91743413d891df601225c5a7250e736",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6dca73cc478c9796d85ecc85fb4784cbba901b82",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Project2100/phase-king",
"max_forks_repo_path": "Report/main.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6dca73cc478c9796d85ecc85fb4784cbba901b82",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Project2100/phase-king",
"max_issues_repo_path": "Report/main.tex",
"max_line_length": 519,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6dca73cc478c9796d85ecc85fb4784cbba901b82",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Project2100/phase-king",
"max_stars_repo_path": "Report/main.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2728,
"size": 10315
} |
\filetitle{estimate}{Estimate a reduced-form VAR or BVAR}{VAR/estimate}
\paragraph{Syntax}\label{syntax}
\begin{verbatim}
[V,VData,Fitted] = estimate(V,Inp,Range,...)
\end{verbatim}
\paragraph{Input arguments}\label{input-arguments}
\begin{itemize}
\item
\texttt{V} {[} VAR {]} - Empty VAR object.
\item
\texttt{Inp} {[} struct {]} - Input database.
\item
\texttt{Range} {[} numeric {]} - Estimation range, including
\texttt{P} pre-sample periods, where \texttt{P} is the order of the
VAR.
\end{itemize}
\paragraph{Output arguments}\label{output-arguments}
\begin{itemize}
\item
\texttt{V} {[} VAR {]} - Estimated reduced-form VAR object.
\item
\texttt{VData} {[} struct {]} - Output database with the endogenous
variables and the estimated residuals.
\item
\texttt{Fitted} {[} numeric {]} - Periods in which fitted values have
been calculated.
\end{itemize}
\paragraph{Options}\label{options}
\begin{itemize}
\item
\texttt{'A='} {[} numeric \textbar{} \emph{empty} {]} - Restrictions
on the individual values in the transition matrix, \texttt{A}.
\item
\texttt{'BVAR='} {[} numeric {]} - Prior dummy observations for
estimating a BVAR; construct the dummy observations using the one of
the \texttt{BVAR} functions.
\item
\texttt{'C='} {[} numeric \textbar{} \emph{empty} {]} - Restrictions
on the individual values in the constant vector, \texttt{C}.
\item
\texttt{'J='} {[} numeric \textbar{} \emph{empty} {]} - Restrictions
on the individual values in the coefficient matrix in front of
exogenous inputs, \texttt{J}.
\item
\texttt{'diff='} {[} \texttt{true} \textbar{} \emph{\texttt{false}}
{]} - Difference the series before estimating the VAR; integrate the
series back afterwards.
\item
\texttt{'G='} {[} numeric \textbar{} \emph{empty} {]} - Restrictions
on the individual values in the coefficient matrix in front of the
co-integrating vector, \texttt{G}.
\item
\texttt{'cointeg='} {[} numeric \textbar{} \emph{empty} {]} -
Co-integrating vectors (in rows) that will be imposed on the estimated
VAR.
\item
\texttt{'comment='} {[} char \textbar{} \texttt{Inf} {]} - Assign
comment to the estimated VAR object; \texttt{Inf} means the existing
comment will be preserved.
\item
\texttt{'constraints='} {[} char \textbar{} cellstr {]} - General
linear constraints on the VAR parameters.
\item
\texttt{'constant='} {[} \emph{\texttt{true}} \textbar{}
\texttt{false} {]} - Include a constant vector in the VAR.
\item
\texttt{'covParam='} {[} \texttt{true} \textbar{}
\emph{\texttt{false}} {]} - Calculate and store the covariance matrix
of estimated parameters.
\item
\texttt{'eqtnByEqtn='} {[} \texttt{true} \textbar{}
\emph{\texttt{false}} {]} - Estimate the VAR equation by equation.
\item
\texttt{'maxIter='} {[} numeric \textbar{} \emph{\texttt{1}} {]} -
Maximum number of iterations when generalised least squares algorithm
is involved.
\item
\texttt{'mean='} {[} numeric \textbar{} \emph{empty} {]} - Impose a
particular asymptotic mean on the VAR process.
\item
\texttt{'order='} {[} numeric \textbar{} \emph{\texttt{1}} {]} - Order
of the VAR.
\item
\texttt{'progress='} {[} \texttt{true} \textbar{}
\emph{\texttt{false}} {]} - Display progress bar in the command
window.
\item
\texttt{'schur='} {[} \emph{\texttt{true}} \textbar{} \texttt{false}
{]} - Calculate triangular (Schur) representation of the estimated VAR
straight away.
\item
\texttt{'stdize='} {[} \texttt{true} \textbar{} \emph{\texttt{false}}
{]} - Adjust the prior dummy observations by the std dev of the
observations.
\item
\texttt{'timeWeights=}' {[} tseries \textbar{} empty {]} - Time series
of weights applied to individual periods in the estimation range.
\item
\texttt{'tolerance='} {[} numeric \textbar{} \emph{\texttt{1e-5}} {]}
- Convergence tolerance when generalised least squares algorithm is
involved.
\item
\texttt{'warning='} {[} \emph{\texttt{true}} \textbar{} \texttt{false}
{]} - Display warnings produced by this function.
\end{itemize}
\paragraph{Options for panel VAR}\label{options-for-panel-var}
\begin{itemize}
\item
\texttt{'fixedEff='} {[} \texttt{true} \textbar{}
\emph{\texttt{false}} {]} - Include constant dummies for fixed effect
in panel estimation; applies only if \texttt{'constant=' true}.
\item
\texttt{'groupWeights='} {[} numeric \textbar{} \emph{empty} {]} - A
1-by-NGrp vector of weights applied to groups in panel estimation,
where NGrp is the number of groups; the weights will be rescaled so as
to sum up to \texttt{1}.
\end{itemize}
\paragraph{Description}\label{description}
\subparagraph{Estimating a panel VAR}\label{estimating-a-panel-var}
Panel VAR objects are created by calling the function
\href{VAR/VAR}{\texttt{VAR}} with two input arguments: the list of
variables, and the list of group names. To estimate a panel VAR, the
input data, \texttt{Inp}, must be organised a super-database with
sub-databases for each group, and time series for each variables within
each group:
\begin{verbatim}
d.Group1_Name.Var1_Name
d.Group1_Name.Var2_Name
...
d.Group2_Name.Var1_Name
d.Group2_Name.Var2_Name
...
\end{verbatim}
\paragraph{Example}\label{example}
| {
"alphanum_fraction": 0.6962242563,
"avg_line_length": 34.0519480519,
"ext": "tex",
"hexsha": "aa6d629144e6959722ca4a05af752692f2038ebf",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-01-17T07:06:39.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-17T07:06:39.000Z",
"max_forks_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_forks_repo_path": "-help/VAR/estimate.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_issues_repo_issues_event_max_datetime": "2020-09-02T10:40:25.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-03-28T08:13:20.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_issues_repo_path": "-help/VAR/estimate.tex",
"max_line_length": 75,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "OGResearch/IRIS-Toolbox-For-Octave",
"max_stars_repo_path": "-help/VAR/estimate.tex",
"max_stars_repo_stars_event_max_datetime": "2017-12-06T13:38:38.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-06T13:38:38.000Z",
"num_tokens": 1614,
"size": 5244
} |
\documentclass{article}
\usepackage{arxiv}
\usepackage[utf8]{inputenc} % allow utf-8 input
\usepackage[T1]{fontenc} % use 8-bit T1 fonts
\usepackage{hyperref} % hyperlinks
\usepackage{url} % simple URL typesetting
\usepackage{booktabs} % professional-quality tables
\usepackage{amsfonts} % blackboard math symbols
\usepackage{nicefrac} % compact symbols for 1/2, etc.
\usepackage{microtype} % microtypography
\usepackage{lipsum} % Can be removed after putting your text content
\usepackage{amssymb,amsmath}
\usepackage{listings}
\usepackage{graphicx}
\usepackage{subfig}
%\usepackage{apacite}
\title{Policy interventions for eradication of SARS-CoV-2 by mobile-phone contact-tracing}
%\date{September 9, 1985} % Here you can change the date presented in the paper title
%\date{} % Or removing it
\author{
Daniel Tang\\
Leeds Institute for Data Analytics\thanks{This project has received funding from the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No. 757455)}\\
University of Leeds\\
Leeds, UK\\
\texttt{[email protected]} \\
%% examples of more authors
%% \AND
%% Coauthor \\
%% Affiliation \\
%% Address \\
}
\begin{document}
\maketitle
\begin{abstract}
With the recent announcement\cite{applegoogle} that Apple and Google will introduce a contact-tracing API to iOS and Android, and later add contact tracing functionality directly to their OS's, it seems increasingly likely that contact tracing via a smart phone will form an important part of the effort to manage the COVID-19 pandemic and prevent resurgences of the disease after an initial outbreak.
However, contact-tracing models have shown \cite{Ferrettieabb6936}\cite{hellewellfeasibility} that there remains a high degree of uncertainty over whether contact tracing alone will be enough to control the virus. Here, we suggest complementary policies that could be used as part of a responsive policy to increase the effectiveness of smart phone contact tracing in the event that a resurgence looks imminent.
\end{abstract}
% keywords can be removed
\keywords{COVID-19, SARS-CoV-2}
\section{Introduction}
Contact-tracing models\cite{Ferrettieabb6936}\cite{hellewellfeasibility} have shown that contact-tracing using a mobile phone app has the potential to control and possibly even eradicate SARS-CoV-2. However, the results from these models, along with those from our own model, show that the uncertainty in $R0$ and in the amount of asymptomatic transmission (in the form of transmission from asymptomatic carriers and pre-symptomatic transmission) means that we cannot be certain that mobile-phone contact tracing alone will prevent a resurgence of the disease.
In previous publications, the results of contact-tracing models have often been shown in terms of the proportion of contacts traced. It is important to distinguish between the proportion of people using a contact-tracing app and the proportion of contacts traced. Current smart phone ownership among UK adults was 88\% as of 2019\cite{deloitte}. Ownership among teenagers is similar\cite{statistica}\footnote{younger children would have to be issued with bluetooth tracing devices in order to be traced}. If phone ownership was randomly distributed we would expect this to result in 77\% of close contacts being between parties who both own a smart phone. However, we would expect people who own a smart phone to be more likely to be in close contact with other people who also own a smart phone, and also on average to have a higher frequency of close contacts, so we assume that 90\% of close contacts are between people who both own a smart phone. A recent UK survey\cite{abeler2020Support} showed that just under 75\% of respondents who owned a mobile phone would probably or definitely install a contact tracing app. Interstingly, according to the survey, making the choice opt-out rather than opt-in (as will be the case once contact tracing is integrated directly into the phone's operating system) did not change the proportion. Given this, if 75\% of smart-phone owners install the app (or leave contact tracing enabled), then we would expect only 51\% of close contacts to be between parties who both have a smart phone with contact tracing enabled. Since contact tracing needs to be enabled on both phones to record a contact, this is the expected proportion of contacts that can be traced under these assumptions. With this proportion of close contacts successfully traced, the models show it is far from guaranteed that contact tracing will be effective and so smart phone tracing should not be relied upon to prevent a resurgence without other measures.
We suggest the adoption of a responsive policy where data collected from the app, and elsewhere, can be used to constantly monitor the level of suppression being achieved. In the event that indicators show the beginnings of a resurgence, stronger policies should be ready to be put in place immediately to further reduce the effective $R$ number and prevent a resurgence.
We describe four policy scenarios which could easily be implemented and demonstrate, using numerical experiments, that we would expect them to have a dramatic effect on the effectiveness of contact tracing.
\section{Policy Scenarios}
For each scenario we calculated the probability that an initial population of 100 infected agents was eradicated. Eradication was deemed to have been achieved if the cumulative number of cases remained below 5000 and there were no untraced infected population at 15 weeks into the simulation. The probability of eradication was estimated by performing a Monte-Carlo run of 500 simulations and counting the proportion that achieved eradication.
Although there is uncertainty in all the model parameters, sensitivity analysis of our model showed that the probability of eradication was very sensitive to $R0$ and to the total proportion of asymptomatic transmission (i.e. if $p_a$ is the proportion of sub-clinical cases, $p_p$ is the proportion of pre-symptomatic transmission among people who eventually become symptomatic and $\rho$ is the infectiveness of a sub-clinical case relative to a symptomatic, then the total proportion of asymptomatic transmission is $p_p + \rho p_a - \rho p_ap_p$). So we show contour maps of the probability of eradication over the likely range of these values. We found that the probability of control was \textbf{not} very sensitive to the ratio of asymptomatics to pre-symptomatics once the proportion of asymptomatic transmission was fixed, so in the simulations we assumed that $p_a = p_p$.
\subsection{Baseline scenario}
In the baseline scenario, people are instructed to self-isolate if they become symptomatic and immediately take an antigen test. If positive, the whole household is instructed to self-isolate and also take tests. Anyone who tests positive must self isolate but anyone can opt not to take a test. If negative, the person is told to isolate and is re-tested in 4 days time. If this test is also negative, the person can come out of isolation.
Anitgen tests are assumed to give an immediate result, if the test is taken before infectiveness reaches 2\% per day, then the test will be negative even if the person will eventually develop the disease. After this time the test is 85\% accurate (i.e. there is a 15\% chance of a false negative).
Confirmed cases who have a mobile phone with contact tracing use the app to alert all recorded close contacts. Close contacts will then be instructed to self isolate and take tests as above.
We assume that, due to government advertising, take-up of the app among smart phone users is 80\%. 20\% of the population are assumed not to comply with either self-isolation or installation of any tracing app (even if they have a smart phone). 95\% of agents are assumed to have a smart phone (meaning 90\% of close contacts are between two agents that have a smart phone, as discussed in the introduction). These figures correspond to 58\% of contacts being traced.
Figure \ref{baseline} shows the contour map of probability of eradication for this scenario for a range of $R0$ and total asymptomatic transmission. For almost all values, contact tracing is unlikely to prevent another outbreak.
\begin{figure}
\begin{center}
\includegraphics[width = 10cm]{baseline.pdf}
\end{center}
\caption{Probability of eradication under baseline scenario}
\label{baseline}
\end{figure}
\subsection{Workplace symptom reporting}
This scenario is the same as the baseline but with the addition that companies and schools have a duty to test an employee/student if they are seen to be symptomatic. Antigen test kits could be kept on-site. We assume in this scenario that a symptomatic person who elects not to self-isolate has a 90\% chance of being identified at work.
As can be seen from Figure \ref{workplaceSymptom}, this dramatically improves the probability of eradication over the baseline.
\begin{figure}
\begin{center}
\includegraphics[width = 10cm]{workplaceSymptomMonitoring.pdf}
\end{center}
\caption{Probability of eradication under workplace symptom monitoring}
\label{workplaceSymptom}
\end{figure}
\subsection{Whole household testing}
This scenario is the same as workplace symptom reporting but with the addition that if a person tests positive, their whole household must be tested. In this case testing is mandatory rather than optional, so people who would not otherwise comply with an instruction to be tested will in this scenario be tested. See figure \ref{householdAndWorkplaceSymptom}.
\begin{figure}
\begin{center}
\includegraphics[width = 10cm]{householdEnforcement.pdf}
\end{center}
\caption{Probability of eradication under whole household testing and workplace symptom monitoring}
\label{householdAndWorkplaceSymptom}
\end{figure}
\subsection{Whole household testing and workplace tracing}
In this scenario, all the above measures are in place but companies and schools have an additional duty to ensure contact tracing in the workplace. This may consist of issuing a bluetooth device (not necessarily a smart phone) to employees or students who don't have a smart phone or who elect not to install the app. The device would be carried whenever the person is in the workplace. See figure \ref{householdAndWorkplaceEnforcement} for the probability of eradication under this scenario
\begin{figure}
\begin{center}
\includegraphics[width = 10cm]{workplaceAndHouseholdEnforcement.pdf}
\end{center}
\caption{Probability of eradication under whole household testing and workplace tracing}
\label{householdAndWorkplaceEnforcement}
\end{figure}
\section{Description of the Model}
The model used is based on the stochastic branching model described in \cite{hellewellfeasibility} but implemented as an agent-based, discrete event simulation. This allowed us to implement more complex containment strategies with less effort at the cost of execution speed. It also allows us to correctly capture the tracing of infected agents via a previously untraced mutual infector, which is not properly captured in a stochastic branching model. In the presence of many asymptomatic carriers, and very fast and accurate tracing, this is expected to be important.
The model consists of infected agents, each of which belongs to a household. Once infected, an agent goes though an incubation period with duration drawn from a Weibull distribution with shape parameter $2.322737$ and scale parameter $6.492272$\cite{backer2020incubation}. The transmission generation interval (i.e. time from exposure to transmission) is drawn from a skew normal distribution with location parameter equal to the clinical onset time (i.e. end of the incubation period) and scale parameter of 2.0. In order to avoid unrealistically early transmissions, the generation interval was bounded to a minimum of 1 day. A proportion of agents are asymptomatic, these will never have clinical onset and are assumed to be $\frac{2}{3}$ as infectious as symptomatic carriers\cite{ferguson2020impact}. The number of susceptible agents that an infected agent will infect if not isolated is drawn from a negative binomial distribution with overdispersion parameter $10.0$\cite{zhuang2020preliminary}\cite{riou2020pattern} and mean of $\frac{3R_0}{3 - \rho}$ for symptomatic agents and $\frac{2R_0}{3 - \rho}$ for asymptomatic agents where $\rho$ is the probability of being asymptomatic and $R_0$ is the basic reproductive number. At each transmission event a new infected agent is created, unless the infecting agent is isolated, in which case the event has no effect. Following\cite{Ferrettieabb6936} 10\% of transmission is ``environmental'' (i.e. via surfaces, air-conditioning etc.) meaning that it cannot be traced to the infector even if they have the app installed.
Each transmission event occurs either in the household, at the workplace/school or in the community. It is assumed that 5\% of the population have immunity from a previous outbreak. After an outbreak we would expect there to be a correlation in immunity between members of the same household since during the peak, under ``stay at home'' rules, if one member of a household contracts the disease it is likely that all other members will also contract it, so we end up with immune and susceptible households. This means that only members of susceptible households can become infected during the contact-tracing stage, so we assume no household members of an infected agent are immune. The relative probability of transmission in the household is assumed to be 3 times greater in the household than in the other locations. This was calibrated in order to obtain equal aggregate numbers of transmission events in each location under no intervention\cite{ferguson2020impact}. Although studies from Korea and US suggest this factor may be higher\cite{OsongReport}\cite{burke2020active}, the uncertainty is high and we keep this at 3 for the UK due to the prevalence of HMOs in the UK. The distribution of number of members in a household was calibrated against\cite{smithHouseholds}.
The source code of the model is available at \href{https://github.com/danftang/Covid19}{https://github.com/danftang/Covid19}
\section{Discussion}
We have suggested a number of increasingly effective, but also increasingly draconian measures that could be used to improve the chances that contact tracing is successful in avoiding another resurgence of COVID-19. It is envisaged that the measures would be implemented in response to the evolving situation and only when absolutely necessary to avoid a resurgence. With all these measures in place, containment is likely, but if $R0$ and total asymptomatic transmission turns out to be very high further measures may be necessary. In this case, social distancing and public hygiene measures would need to be implemented in addition. Although some of the measures discussed here may seem authoritarian, they must be put in the context of the situation we face. Life with contact tracing will be very close to normal for the vast majority of the population and will be much less socially disruptive, and will impinge much less on personal freedoms, than a national lock-down.
%\bibliographystyle{unsrtnat}
\bibliographystyle{unsrturl}
%\bibliographystyle{alpha}
%\bibliographystyle{plainurl}
%\bibliographystyle{apalike}
%\bibliographystyle{apacite}
\bibliography{references}
\end{document}
| {
"alphanum_fraction": 0.8038748713,
"avg_line_length": 104.972972973,
"ext": "tex",
"hexsha": "afa43c6c491cfe6515439271a73287fadd2acf98",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d0c267721f01bc7e3a8912a1e281d53f95a2d42a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "danftang/Covid19",
"max_forks_repo_path": "doc/ContactTracingPolicy.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d0c267721f01bc7e3a8912a1e281d53f95a2d42a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "danftang/Covid19",
"max_issues_repo_path": "doc/ContactTracingPolicy.tex",
"max_line_length": 1967,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d0c267721f01bc7e3a8912a1e281d53f95a2d42a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "danftang/Covid19",
"max_stars_repo_path": "doc/ContactTracingPolicy.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3403,
"size": 15536
} |
\documentclass{article}
\usepackage{overture}%vdmsl-2e}
\usepackage{fullpage}
\title{A Telephone Exchange Specification in VDM-SL}
\author{Bernhard K. Aichernig}
\date{November 1998}
\begin{document}
\maketitle
The following example has been taken out of \cite{Abrial96} and has been
translated from the B-notation into VDM-SL\cite{Fitzgerald&98, Jones90}. It
demonstrates how an event-based system may be modeled using the
specification language of the Vienna Development Method. In the following,
operations specify the events which can be initiated either by the system or
by a subscriber (user). An implicit style using pre- and post-conditions has
been chosen, in order to model the system's state transitions.
The model of the telephone
exchange is centred around a set of $subscribers$ who may be engaged in
telephone conversations through a network controlled by an exchange.
\section{Informal Specification}
As the communication between two subscribers is not installed immediately,
each subscriber navigates through a variety of statuses, which we study in
detail in what follows.
\begin{description}
\item[Free Subscribers] A subscriber might be free ($FR$),
meaning that he is not engaged in any telephone conversation or attempting to
do so.
\item[Unavailable Subscribers] A subscriber who is temporarily unavailable
($UN$). Subscribers may enter this status as a result of a spontaneous
decision on the part of the exchange. This happens when a subscriber has been
attempting to call another subscriber unsuccessfully for too long a period of
time. We may also enter this status at the end of some conversation between
two subscribers.
\item[Initiators or Recipients] Non-free and non-unavailable subscribers are
either initiators or recipients of telephone calls. An initiator is the only
one able to terminate a telephone conversation. By contrast, a recipient
hanging up just suspends the conversation, which may be resumed as soon as
he again lifts his handset.
\item[Attempting, Waiting or Speaking Initiators]
An initiator may enter into various sub-states: he might be attempting ($AI$)
to call somebody (e.g. dialing), or waiting $WI$ for somebody to answer
(connection established and the phone rings), or finally
speaking $SI$ to somebody.
\item[Waiting or Speaking Recipients] A recipient might be speaking ($SR$) or
waiting ($WR$), because his own telephone is ringing, or because he has
suspended an already engaged telephone conversation.
\end{description}
\include{generated/latex/specification/telephone.vdmsl}
\section{Typical Scenarios}
Finally, some typical sequences of valid events are listed:\\ \\
Lift $\longrightarrow$ ClearAttempt \\
\\
Lift $\longrightarrow$ MakeUn $\longrightarrow$ ClearUn \\
\\
Lift $\longrightarrow$ Connect $\longrightarrow$ ClearWait\\
\\
Lift $\longrightarrow$ Connect $\longrightarrow$ Answer $\longrightarrow$ ClearSpeak $\longrightarrow$ ClearUn\\
\\
Lift $\longrightarrow$ Connect $\longrightarrow$ Answer $\longrightarrow$ Suspend $\longrightarrow$ ClearWait
\bibliographystyle{plain}
\bibliography{telephone}
\end{document}
| {
"alphanum_fraction": 0.7960167041,
"avg_line_length": 46.4626865672,
"ext": "tex",
"hexsha": "5841424855e00e859be0a9924be385049fb73b29",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f48f7371878a923fa0eb0c473ea94bc905a3ca98",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SimplisticCode/VDM_Toolkit",
"max_forks_repo_path": "plugins/vdm2isa/java/src/test/resources/Examples/telephoneSL/telephone.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f48f7371878a923fa0eb0c473ea94bc905a3ca98",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SimplisticCode/VDM_Toolkit",
"max_issues_repo_path": "plugins/vdm2isa/java/src/test/resources/Examples/telephoneSL/telephone.tex",
"max_line_length": 112,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f48f7371878a923fa0eb0c473ea94bc905a3ca98",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SimplisticCode/VDM_Toolkit",
"max_stars_repo_path": "plugins/vdm2isa/java/src/test/resources/Examples/telephoneSL/telephone.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 703,
"size": 3113
} |
% Options for packages loaded elsewhere
\PassOptionsToPackage{unicode}{hyperref}
\PassOptionsToPackage{hyphens}{url}
%
\documentclass[
]{book}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provide euro and other symbols
\else % if luatex or xetex
\usepackage{unicode-math}
\defaultfontfeatures{Scale=MatchLowercase}
\defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1}
\fi
% Use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\IfFileExists{microtype.sty}{% use microtype if available
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\makeatletter
\@ifundefined{KOMAClassName}{% if non-KOMA class
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}}
}{% if KOMA class
\KOMAoptions{parskip=half}}
\makeatother
\usepackage{xcolor}
\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available
\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}}
\hypersetup{
pdftitle={Bayesian Inference with Bayes Factors},
pdfauthor={Dan MacLean},
hidelinks,
pdfcreator={LaTeX via pandoc}}
\urlstyle{same} % disable monospaced font for URLs
\usepackage{color}
\usepackage{fancyvrb}
\newcommand{\VerbBar}{|}
\newcommand{\VERB}{\Verb[commandchars=\\\{\}]}
\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
% Add ',fontsize=\small' for more characters per line
\usepackage{framed}
\definecolor{shadecolor}{RGB}{248,248,248}
\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}}
\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}}
\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}}
\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\BuiltInTok}[1]{#1}
\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}}
\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}}
\newcommand{\ExtensionTok}[1]{#1}
\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}}
\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\ImportTok}[1]{#1}
\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}}
\newcommand{\NormalTok}[1]{#1}
\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}}
\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}}
\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}}
\newcommand{\RegionMarkerTok}[1]{#1}
\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}}
\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}}
\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}}
\usepackage{longtable,booktabs}
% Correct order of tables after \paragraph or \subparagraph
\usepackage{etoolbox}
\makeatletter
\patchcmd\longtable{\par}{\if@noskipsec\mbox{}\fi\par}{}{}
\makeatother
% Allow footnotes in longtable head/foot
\IfFileExists{footnotehyper.sty}{\usepackage{footnotehyper}}{\usepackage{footnote}}
\makesavenoteenv{longtable}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
% Set default figure placement to htbp
\makeatletter
\def\fps@figure{htbp}
\makeatother
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{5}
\usepackage{booktabs}
\usepackage{tcolorbox}
\usepackage{quotchap}
\usepackage[T1]{fontenc}
\usepackage{amsthm}
\makeatletter
\def\thm@space@setup{%
\thm@preskip=8pt plus 2pt minus 4pt
\thm@postskip=\thm@preskip
}
\makeatother
\setmainfont[UprightFeatures={SmallCapsFont=AlegreyaSC-Regular}]{Alegreya}
\renewcommand{\textfraction}{0.05}
\renewcommand{\topfraction}{0.8}
\renewcommand{\bottomfraction}{0.8}
\renewcommand{\floatpagefraction}{0.75}
\let\oldhref\href
\renewcommand{\href}[2]{#2\footnote{\url{#1}}}
\newenvironment{task}
{ \begin{tcolorbox}[title=For you to do,title filled] }
{ \end{tcolorbox} }
\newenvironment{reader}
{ \begin{tcolorbox}[colbacktitle=red!50!white,
title=huh?,coltitle=white,
fonttitle=\bfseries] }
{ \end{tcolorbox} }
% \newenvironment{roundup}
% { \begin{tcolorbox}[colbacktitle=yellow!50!white,title=Round Up,title filled] }
%{ \end{tcolorbox} }
\newenvironment{myquote}
{\begin{large}
\begin{itshape}
\begin{minipage}{6cm}
}
{
\begin{vspace}{15mm}
\end{vspace}
\end{minipage}
\end{itshape}
\end{large}
}
\newenvironment{sidenote}
{ \begin{tcolorbox}[colbacktitle=blue!50!white,
title=huh?,coltitle=white,
fonttitle=\bfseries] }
{ \end{tcolorbox} }
\newenvironment{roundup}
{ \begin{tcolorbox}[colbacktitle=yellow!50!white,
title=Round Up,coltitle=black,
fonttitle=\bfseries] }
{ \end{tcolorbox} }
\usepackage[]{natbib}
\bibliographystyle{apalike}
\title{Bayesian Inference with Bayes Factors}
\author{Dan MacLean}
\date{2021-03-03}
\begin{document}
\maketitle
{
\setcounter{tocdepth}{1}
\tableofcontents
}
\hypertarget{setting-up}{%
\chapter{Setting up}\label{setting-up}}
The primary purpose of this course is to help you to understand how to use statistics that will help with your research. The course will try to explain a branch of statistics called `Estimation Statistics' which are complementary to the normal sort of hypothesis test procedures and address some of the criticisms of those methods.
Statistics is a computationally heavy topic, so we'll be making use of the R statistical programming environment to do that side of the work. The rest of this chapter will help you get that set up on your own computer.
\hypertarget{prerequisites}{%
\section{Prerequisites}\label{prerequisites}}
\hypertarget{knowledge-prerequisites}{%
\subsection{Knowledge prerequisites}\label{knowledge-prerequisites}}
There are no specific knowledge prerequisites for this book but it will be very helpful if you have read and worked through the \texttt{ggplot}, \texttt{Intro\ to\ Stats} and \texttt{Estimation\ Statistics} books and are familiar with R use.
\hypertarget{software-prerequisites}{%
\subsection{Software prerequisites}\label{software-prerequisites}}
You need to install the following stuff for this book:
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
R
\item
RStudio
\item
Some R packages: \texttt{devtools}, \texttt{tidyverse} and \texttt{BayesFactor} and \texttt{simplebf}
\end{enumerate}
\hypertarget{installing-r}{%
\section{Installing R}\label{installing-r}}
Follow this link and install the right version for your operating system \url{https://www.stats.bris.ac.uk/R/}
\hypertarget{installing-rstudio}{%
\section{Installing RStudio}\label{installing-rstudio}}
Follow this link and install the right version for your operating system \url{https://www.rstudio.com/products/rstudio/download/}
\hypertarget{installing-r-packages-in-rstudio}{%
\section{Installing R packages in RStudio}\label{installing-r-packages-in-rstudio}}
\hypertarget{standard-packages}{%
\subsection{Standard packages}\label{standard-packages}}
In the RStudio console, type
\texttt{install.packages(c("tidyverse",\ "devtools",\ "BayesFactor"))}
and these packages should install. Once that is done, type
\texttt{devtools::install\_github("danmaclean/simplebf")}
to install the final package
\hypertarget{motivation}{%
\chapter{Motivation}\label{motivation}}
\hypertarget{mr.-micawbers-rule-of-statistical-inference}{%
\section{Mr.~Micawber's rule of statistical inference}\label{mr.-micawbers-rule-of-statistical-inference}}
We really do need to move away from \(p\)-values as a gold-standard of truth in experimental science. The ruinous role of the \(p\)-value in modern science can not be overstated. This one value is responsible for happiness and despair in equal measure. They say money is the root of all unhappiness and Dicken's Mr.~Micawber had this to say about the role of money in life:
\begin{quote}
`Annual income 20 pounds, annual expenditure 19 {[}pounds{]} 19 shillings and six pence, result happiness. Annual income 20 pounds, annual expenditure 20 pounds ought and six, result misery.'
\end{quote}
in science a corollary exists:
\begin{quote}
`\(p\) below 0.05, result success, papers, grants, and tenure. \(p\) above 0.05 result failure, misery, ignominy, and rejection.'
\end{quote}
The truth is that \(p < 0.05\) is an entirely arbitrary cut-off and is not in itself a helpful or meaningful value. Various scientific communities, led by publishing requirements, have accepted \(p < 0.05\) as a gold standard of truth against sense and often against rigorousness. With Bayesian tests we will be able to completely do away with \(p\)-values and confidence intervals and in their place use a more evidence based approach to making inferences.
\hypertarget{learning-to-select-hypotheses-using-bayesian-approaches}{%
\section{Learning to select hypotheses using Bayesian approaches}\label{learning-to-select-hypotheses-using-bayesian-approaches}}
The sort of statistics that most experimental science students are taught are called `Frequentist Statistics'. They include the \(t\)-tests, ANOVA and \(\chi^2\)-tests and the linear models that we have studied already.
The inferential approach (how we make decisions about data) in the Frequentist paradigm is often criticised for being weak and is often abused. Although the abuse is as much a consequence of convention in the scientific literature and in scientific publishing, the misinterpretation of \(p\)-values by generations of scientists as it is the philosophical weakness of the methods themselves, the weaknesses persist and over time other paradigms have emerged.
We have seen an alternative in Estimation Statistics, in this course we will look at another - Bayesian Inference. We will use Bayes Factors to compare levels of evidence for one hypothesis over another, rather than just accepting or rejecting a simplistic null hypothesis.
The advantage of this will be that we can much more directly select between specific hypotheses that might describe our data. This will give us a much clearer idea about a question that we instinctively want to answer when we do statistics - `Which hypothesis is most likely true?', we will see that we can formulate this in lots of ways, but in general the hypotheses we want to compare will be something along the lines of some measured quantity being different in different samples. With Frequentist Inference we can only ask the roundabout question, `How often does the difference we observe occur by chance?' and if it isn't likely, say so. With Bayes Factors we will be able to compare directly competing hypotheses and reject the least likely absolutely.
\hypertarget{r-fundamentals}{%
\chapter{R Fundamentals}\label{r-fundamentals}}
\hypertarget{about-this-chapter}{%
\section{About this chapter}\label{about-this-chapter}}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Questions:
\end{enumerate}
\begin{itemize}
\tightlist
\item
How do I use R?
\end{itemize}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\setcounter{enumi}{1}
\tightlist
\item
Objectives:
\end{enumerate}
\begin{itemize}
\tightlist
\item
Become familiar with R syntax
\item
Understand the concepts of objects and assignment
\item
Get exposed to a few functions
\end{itemize}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\setcounter{enumi}{2}
\tightlist
\item
Keypoints:
\end{enumerate}
\begin{itemize}
\tightlist
\item
R's capabilities are provided by functions
\item
R users call functions and get results
\end{itemize}
\hypertarget{working-with-r}{%
\section{Working with R}\label{working-with-r}}
In this workshop we'll use R in the extremely useful RStudio software. For the most part we'll work interactively, meaning we'll type stuff straight into the R console in RStudio (Usually this is a window on the left or lower left) and get our results there too (usually in the console or in a window on the right).
Panels like the ones below mimic the interaction with R and first show the thing to type into R, and below the calculated result from R.
Let's look at how R works by using it for it's most basic job - as a calculator:
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{3} \OperatorTok{+}\StringTok{ }\DecValTok{5}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 8
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{12} \OperatorTok{*}\StringTok{ }\DecValTok{2}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 24
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{1} \OperatorTok{/}\StringTok{ }\DecValTok{3}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.3333333
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\DecValTok{12} \OperatorTok{*}\StringTok{ }\DecValTok{2}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 24
\end{verbatim}
Fairly straightforward, we type in the expression and we get a result. That's how this whole book will work, you type the stuff in, and get answers out. It'll be easiest to learn if you go ahead and copy the examples one by one. Try to resist the urge to use copy and paste. Typing longhand really encourages you to look at what you're entering.
As far as the R output itself goes, it's really straightforward - its just the answer with a \texttt{{[}1{]}} stuck on the front. This \texttt{{[}1{]}} tells us how many items through the output we are. Often R will return long lists of numbers and it can be helpful to have this extra information.
\hypertarget{variables}{%
\section{Variables}\label{variables}}
We can save the output of operations for later use by giving it a name using the assignment symbol \texttt{\textless{}-}. Read this symbol as `gets', so \texttt{x\ \textless{}-\ 5} reads as `x gets 5'. These names are called variables, because the value they are associated with can change.
Let's give five a name, \texttt{x} then refer to the value 5 by it's name. We can then use the name in place of the value. In the jargon of computing we say we are assigning a value to a variable.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{ x <-}\StringTok{ }\DecValTok{5}
\NormalTok{ x}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 5
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{ x }\OperatorTok{*}\StringTok{ }\DecValTok{2}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 10
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{y <-}\StringTok{ }\DecValTok{3}
\NormalTok{x }\OperatorTok{*}\StringTok{ }\NormalTok{y}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 15
\end{verbatim}
This is of course of limited value with just numbers but is of great value when we have large datasets, as the whole thing can be referred to by the variable.
\hypertarget{using-objects-and-functions}{%
\subsection{Using objects and functions}\label{using-objects-and-functions}}
At the top level, R is a simple language with two types of thing: functions and objects. As a user you will use functions to do stuff, and get back objects as an answer. Functions are easy to spot, they are a name followed by a pair of brackets. A function like \texttt{mean()} is the function for calculating a mean. The options (or arguments) for the function go inside the brackets:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{sqrt}\NormalTok{(}\DecValTok{16}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 4
\end{verbatim}
Often the result from a function will be more complicated than a simple number object, often it will be a vector (simple list), like from the \texttt{rnorm()} function that returns lists of random numbers
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{rnorm}\NormalTok{(}\DecValTok{100}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] -0.049885282 1.270584212 0.797462441 0.465722379 -0.369924286
## [6] -0.459561716 -0.447608851 2.145641022 -1.095624405 0.367848480
## [11] -1.262783346 -0.307719066 0.679461240 -0.123659119 -1.997011501
## [16] 0.536039248 -0.705670746 1.494103968 0.158163144 -1.217636177
## [21] -1.313132144 1.255012509 -0.748925886 -0.725726403 -0.035317880
## [26] 0.985956058 0.008657156 1.109026623 -1.777462419 -1.216127378
## [31] -0.703466946 -0.264061698 1.242463863 1.243173082 2.581177187
## [36] 0.345252844 -0.672772657 1.119360603 -0.579203077 -1.693862596
## [41] -0.162001776 -0.478942066 -2.210744537 0.289136091 -0.797079058
## [46] -1.172711182 0.236676839 0.738499852 -0.656039510 -0.686952806
## [51] 0.197099051 1.996565059 -0.467590312 0.875710064 1.107877775
## [56] -0.284758938 0.787981182 1.464123799 0.634736102 -0.094614574
## [61] 0.221175301 -0.471532478 -0.798274788 -2.013496644 -1.054848841
## [66] 1.167303807 -2.740434240 -0.252747507 -1.081027056 -1.256887751
## [71] 1.152733987 0.767035978 0.605015748 -1.074426668 -1.647779808
## [76] -0.218253640 -1.573908314 -0.456003963 0.390359078 -0.238416304
## [81] -0.158573846 -0.645208404 0.209870562 -0.490466613 1.493137881
## [86] 1.091973035 1.615399566 0.844028669 0.052255027 -1.230333314
## [91] 0.796016738 0.106064698 -0.724492593 2.296869328 0.714089944
## [96] -1.027804167 -0.794503548 -0.263306800 -1.286584057 -2.654550533
\end{verbatim}
We can combine objects, variables and functions to do more complex stuff in R, here's how we get the mean of 100 random numbers.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{numbers <-}\StringTok{ }\KeywordTok{rnorm}\NormalTok{(}\DecValTok{100}\NormalTok{)}
\KeywordTok{mean}\NormalTok{(numbers)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.1275872
\end{verbatim}
Here we created a vector object with \texttt{rnorm(100)} and assigned it to the variable \texttt{numbers}. We than used the \texttt{mean()} function, passing it the variable \texttt{numbers}. The \texttt{mean()} function returned the mean of the hundred random numbers.
\hypertarget{dataframes}{%
\section{Dataframes}\label{dataframes}}
One of the more common objects that R uses is a dataframe. The dataframe is a rectangular table-like object that contains data, think of it like a spreadsheet tab. Like the spreadsheet, the dataframe has rows and columns, the columns have names and the different columns can have different types of data in. Here's a little one
\begin{verbatim}
## names age score
## 1 Guido 24 43.34179
## 2 Marty 45 86.77330
## 3 Alan 11 39.47193
\end{verbatim}
Usually we get a dataframe by loading in data from an external source or as a result from functions, occasionally we'll want to hand make one, which can be done with various functions, \texttt{data.frame} being the most common.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{data.frame}\NormalTok{(}
\DataTypeTok{names =} \KeywordTok{c}\NormalTok{(}\StringTok{"Guido"}\NormalTok{, }\StringTok{"Marty"}\NormalTok{, }\StringTok{"Alan"}\NormalTok{),}
\DataTypeTok{age =} \KeywordTok{c}\NormalTok{(}\DecValTok{24}\NormalTok{,}\DecValTok{45}\NormalTok{,}\DecValTok{11}\NormalTok{),}
\DataTypeTok{score =} \KeywordTok{runif}\NormalTok{(}\DecValTok{3}\NormalTok{) }\OperatorTok{*}\StringTok{ }\DecValTok{100}
\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\hypertarget{packages}{%
\section{Packages}\label{packages}}
Many of the tools we use in will come in R packages, little nuggets of code that group related functions together. Installing new packages can be done using the \texttt{Packages} pane of RStudio or the \texttt{install.packages()} function. When we wish to use that code we use the \texttt{library()} function
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(somepackage)}
\end{Highlighting}
\end{Shaded}
\hypertarget{using-r-help}{%
\section{Using R Help}\label{using-r-help}}
R provides a command, called \texttt{?} that will display the documentation for functions. For example \texttt{?mean} will display the help for the \texttt{mean()} function.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{?mean}
\end{Highlighting}
\end{Shaded}
As in all programming languages the internal documentation in R is written with some assumption that the reader is familiar with the language. This can be a pain when you are starting out as the help will seem a bit obscure at times. Don't worry about this, usually the \texttt{Examples} section will give you a good idea of how to use the function and as your experience grows then the more things will make more sense.
\begin{roundup}
\begin{itemize}
\tightlist
\item
R is an excellent and powerful statistical computing environment
\end{itemize}
\end{roundup}
\begin{task}
Complete the interactive tutorial online \url{https://danmaclean.shinyapps.io/r-start}
\end{task}
\hypertarget{bayesian-inference}{%
\chapter{Bayesian Inference}\label{bayesian-inference}}
\hypertarget{about-this-chapter-1}{%
\section{About this chapter}\label{about-this-chapter-1}}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Questions
\begin{itemize}
\tightlist
\item
What is probability?
\item
What does Bayes Theorem do?
\item
How can we compare hypotheses about data?
\end{itemize}
\item
Objectives
\begin{itemize}
\tightlist
\item
Understand the differences between Frequentist and Bayesian probability
\item
Get an appreciation of Bayes Theorem
\item
Understand what a Bayes Factor represents
\end{itemize}
\item
Keypoints
\begin{itemize}
\tightlist
\item
Probability can be based on frequency of events \emph{or} the level of knowledge we have about a thing
\item
Bayes Theorem gives a likelihood based on evidences that can change
\item
Bayes Factors are useful in comparing hypothesis about the same evidence
\end{itemize}
\end{enumerate}
\hypertarget{frequentist-and-bayesian-interpretations-of-probability}{%
\section{Frequentist and Bayesian Interpretations of Probability}\label{frequentist-and-bayesian-interpretations-of-probability}}
It may seem like a strange question to ask, but what, exactly, is probability? Whatever it is it certainly isn't a solid thing that we could carry in a bucket. Probability is a strange and often ill-defined concept that can get very confusing when one starts to think deeply about it. When asked what probability is people will generally start to talk about vague concepts like chance or likelihood or randomness or fate, even. Most people will give examples of coins being thrown or dice being rolled. This ephemerality is no good when we want to use probability so when it comes to working with probability statisticians needed to develop very precise definitions. It turns out that different ways of thinking about likelihoods can result in very different definitions of probability.
The two definitions that we will consider are those called the Frequentist and the Bayesian definitions
\hypertarget{frequentist-probability}{%
\subsection{Frequentist Probability}\label{frequentist-probability}}
The Frequentist definition of probability is based on the frequency of occurrence of events. This is a definition that is most similar to the coin toss or dice throw intuition about probability. A probability can be stated thus
\(P(Event) = \frac{\text{number of ways event can happen}}{\text{number of all possible outcomes}}\)
So in a coin toss, we might get the following probability of getting `heads'
\(P(heads) = \frac{\text{number of heads on the coin}}{\text{number of sides to the coin}}\)
which of course, computes as
\(P(heads) = \frac{1}{2}\)
Thinking of probabilities in this way is similar to a gambler who plays games of chance like roulette or craps, where the odds of winning are entirely based on the outcome of simple random process.
This is so simple and intuitive that we might be tempted to think it's the natural way to think about probabilities, but there are other definitions.
\hypertarget{bayesian-probablity}{%
\subsection{Bayesian Probablity}\label{bayesian-probablity}}
The Bayesian definition of probability is different, it takes probability to be a reasonable expectation of an event, depending on the knowledge that the observer has. You might understand these probabilities similarly to a gambler that bets on horse races and changes their assessment of a horse's winning ability based on the conditions of the ground and the weight of the jockey. These are trickier to understand than the Frequentist definition but an example can be helpful.
Consider that you and a friend are playing cards and that your friend claims to be able to guess the identity of a card that you draw and replace. A frequentist probability would say that the probability of this was \(P(correct) = \frac{1}{52}\). However, you know that your friend is an amateur magician, so you expect that the probability of a correct guess would be much higher than that. That is to say that you have a different reasonable expectation because you have incorporated prior knowledge into your working. Bayesian Probability is based on this prior knowledge and updating of belief based on that knowledge to come up with a posterior likelihood of an event.
In rough terms the answer - a `posterior probability' is arrived at by combining a `prior probability' and `evidence'. In the card guess example the `prior probability' was the raw chance based probability that anyone would guess the card \(\frac{1}{52}\), the `evidence' was the fact that your friend was an amateur magician and the `posterior probability' was the updated `prior probability' that the chance of guessing was higher than \(\frac{1}{52}\).
One problem we might spot is how exactly do we update our probability to actually get a measure of the posterior? A formula known as Bayes Theorem lets us do the calculation, but it can be very hard to get the actual numbers we need for evidence and this can be a barrier to using Bayes in the real world. However, let's look work one calculation through with some assumed numbers to get a feel.
\hypertarget{bayes-theorem-by-rough-example}{%
\section{Bayes Theorem by Rough Example}\label{bayes-theorem-by-rough-example}}
The mathematical basis of calculating a posterior belief or likelihood is done with a formula called Bayes Theorem. Which, using our card example defines the posterior as
\(P(correct | magician)\)
which reads as the probability of a guess being correct once you know you are working with a magician.
It defines the prior as
\(P(correct)\)
which reads as the probability of being correct in a random guess (which we know to be \(\frac{1}{52}\))
And it defines the evidence as
\(P(magician|correct)\)
which reads as the probability of the person being a magician given a guess was correct. This is the number which can be hardest to work out in general though in this case we might say it is quite high, say 0.9.
Bayes Theorem then works out the posterior probability given these numbers. There is a very famous formula for this, that I won't include here for simplicity sake, but it is very interesting. We can take a short cut and use R to work out the posterior from the prior and the evidence as follows
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(LaplacesDemon)}
\NormalTok{prior <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{51}\OperatorTok{/}\DecValTok{52}\NormalTok{,}\DecValTok{1}\OperatorTok{/}\DecValTok{52}\NormalTok{) }
\NormalTok{evidence <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\FloatTok{0.9}\NormalTok{, }\FloatTok{0.1}\NormalTok{)}
\KeywordTok{BayesTheorem}\NormalTok{(prior, evidence)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## [1] 0.997826087 0.002173913
## attr(,"class")
## [1] "bayestheorem"
\end{verbatim}
as it is the first reported number we want, we can see that we get a 99\% posterior probability that the guess will be correct if we know that the 90\% of correct guesser's are magicians.
The key thing to take away here is that the Bayesian Probability allows us to modify our view based on changes in the evidence. This is a key attribute as we can use it to compare the resulting posteriors from different evidences. In other words it allows us to compare different hypotheses based on different evidence to see which is the more likely.
\hypertarget{hypotheses-in-frequentist-and-bayesian-statistics}{%
\section{Hypotheses in Frequentist and Bayesian Statistics}\label{hypotheses-in-frequentist-and-bayesian-statistics}}
Now that we know Bayes Statistics allow for updating our beliefs in the light of different evidence we can look at how we can formulate hypotheses to take advantage of this and do something very different with Bayes than we do with Frequentist ideas.
Let' recap the logic of hypothesis tests in Frequentist statistics.
\hypertarget{frequentist-hypotheses}{%
\subsection{Frequentist Hypotheses}\label{frequentist-hypotheses}}
You may recall that the first step of doing a hypothesis test like a \(t\)-test is to set up our hypotheses. The first \(H_0\) is the null hypothesis which represents the situation where there is no difference and \(H_1\) is the alternative. Next we select a Null model that represents the Null hypothesis, this step is usually implicit at the operator level and comes as part of the linear model or \(t\)-test that we choose to use, and usually is based on the Normal Distribution. Our hypothesis represent the situation as follows
\begin{itemize}
\tightlist
\item
\(H_0 : \bar{x}_1 - \bar{x}_2 = 0\) IE, the sample means are equal.
\item
\(H_1 : \bar{x}_2 - \bar{x}_2 \neq 0\) IE, the sample means are not equal.
\end{itemize}
We test \(H_0\) (the Null Hypothesis and Model) to see how likely the observed result is under that and if it is unlikely at some level (\(p\)) then we reject \(H_0\) and accept \(H_1\).
We criticised this for being weak inference in the Linear Model course. Let's do that again. In this framework haven't we accepted \(H_1\) without analysing it? Here it means that we have had to set up hypotheses that are binary and not compare them directly. We have a take or leave approach to hypotheses.
We haven't, for example been able to ask whether \(\bar{x}_1 > \bar{x}_2\) because that wouldn't be askable under our single test, binary paradigm. That's a limitation. As scientists we should be able to collect data and compare models or hypotheses about that data directly.
\hypertarget{bayesian-hypotheses}{%
\subsection{Bayesian Hypotheses}\label{bayesian-hypotheses}}
In the Bayesian Framework we can formulate hypotheses as we wish and compare them directly, using Bayesian probabilities to examine models with different evidences and priors. So if the evidence shows that \(H_1\) isn't any more believable than \(H_0\) we wouldn't falsely fall into the trap of believing \(H_1\) was somehow more correct.
Bayesian Hypotheses can be a bit more like this
\begin{itemize}
\tightlist
\item
\(H_0 : \bar{x}_1 < \bar{x}_2\) IE sample 1 has a lower mean than sample 2
\item
\(H_1 : \bar{x}_1 > \bar{x}_2\) IE sample 1 has a higher mean than sample 2.
\end{itemize}
which is often much more intellectually satisfying and can lead to clearer answers than the more binary Frequentist hypotheses.
A significant limitation of the approach is the need to select and quantify the prior and the evidence, which can be crucial and lead to very different outcomes if different values are chosen.
Selection of the prior knowledge itself is very difficult and no suitable data may exist. Getting the right data is subjective in many cases and there is no one right way. Domain knowledge is important and often crucial but this can easily lead to bias. An unwitting, uncareful (or say it quietly - unscrupulous) operator could select a prior that would bias the result in favour of a preferred hypothesis. This is a form of confirmation bias or interpretation of the data in a way that confirms your prior beliefs.
For these reasons Frequentist approaches are often the most pragmatic and \emph{a priori} transparent method, though if the priors and evidence can be collected in a non-biased way Bayesian approaches offer us excellent alternatives.
\hypertarget{bayes-factors}{%
\section{Bayes Factors}\label{bayes-factors}}
We can use Bayesian Inference through a tool known as Bayes Factors. Bayes Factors are a method of directly comparing the posteriors of different models with different evidences and priors.
Bayes Factors make a ratio of the result of one model or hypothesis over another, resulting in a single quantity that we can examine. Consider that our hypotheses above have been put through the process and a result gained thus
\begin{itemize}
\tightlist
\item
\(H_0 : \bar{x}_1 < \bar{x}_2 \leadsto Posterior = 0.2\)
\item
\(H_1 : \bar{x}_1 > \bar{x}_2 \leadsto Posterior = 0.6\)
\end{itemize}
We can clearly see that \(H_1\) has 3 times more support than \(H_0\) and we would want to accept that as a better explanation of our data.
Bayes Factors are just that, the ratio of the relative goodness of the hypotheses. From this we can make statements about the support for hypotheses. \citet{wagenmakers2011} created a table of thresholds indicating interpretations for different Bayes Factors on two hypotheses.
\begin{tabular}{l|l}
\hline
Bayes.Factor & Interpretation\\
\hline
>100 & Extreme evidence for \$H\_0\$ compared to \$H\_1\$\\
\hline
30..100 & Very Strong evidence for \$H\_0\$ compared to \$H\_1\$\\
\hline
10..30 & Strong evidence for \$H\_0\$ compared to \$H\_1\$\\
\hline
3..10 & Substantial evidence for \$H\_0\$ compared to \$H\_1\$\\
\hline
1..3 & Anecdotal evidence for \$H\_0\$ compared to \$H\_1\$\\
\hline
1 & No evidence\\
\hline
1..1/3 & Anecdotal evidence for \$H\_1\$ compared to \$H\_0\$\\
\hline
1/3..1/10 & Substantial evidence for \$H\_1\$ compared to \$H\_0\$\\
\hline
1/10..1/30 & Strong evidence for \$H\_1\$ compared to \$H\_0\$\\
\hline
1/30..1/100 & Very Strong evidence for \$H\_1\$ compared to \$H\_0\$\\
\hline
<1/100 & Extreme evidence for \$H\_1\$ compared to \$H\_0\$\\
\hline
\end{tabular}
These are extremely useful especially when used with other measures and interpretations like estimation statistics to allow us to make statistical claims.
In the next chapters we will look at how to use Bayes Factors in place of common frequentist hypothesis tests.
\begin{sidenote}
The \citet{wagenmakers2011} article is fun if you can get hold of it. It's a commentary on an earlier article in which the researchers conclude that people have the ability to see into the future! Which they arrive at by misapplying statistics the same way that researchers across all fields do. Wagenmakers \emph{et al} reperform the analysis with Bayes Factors and show that the original conclusions are unsound.
\end{sidenote}
\hypertarget{bayes-factor-t-tests}{%
\chapter{\texorpdfstring{Bayes Factor \(t\)-tests}{Bayes Factor t-tests}}\label{bayes-factor-t-tests}}
\hypertarget{about-this-chapter-2}{%
\section{About this chapter}\label{about-this-chapter-2}}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Questions
\begin{itemize}
\tightlist
\item
How can I do compare two continuous samples with Bayes Factors?
\item
How can I specify a directional hypothesis?
\item
How much difference does the prior make?
\end{itemize}
\item
Objectives
\begin{itemize}
\tightlist
\item
Understand how a Bayes Factor \(t\)-test can be done in R
\item
Consider how \(p\) and the Bayes Factor are not contradictory
\item
Understand that hypothesis and prior selection is important
\end{itemize}
\item
Keypoints
\begin{itemize}
\tightlist
\item
The \texttt{BayesFactor} package provides functions for Bayes Factor analysis
\item
Bayes Factors and \(p\)-values ask very different questions
\item
One-tailed tests are possible and may be better options
\end{itemize}
\end{enumerate}
In this section we'll look at how we can do a \(t\)-test-like two sample comparison with Bayes Factors. The process is surprisingly straight forward but does need us to pay attention to the weaknesses of the Bayes method - specifically choosing the prior probability distribution. To actually do the tests we'll use the \texttt{ttestBF()} in the \texttt{BayesFactor} package.
\hypertarget{a-frequentist-t-test}{%
\section{\texorpdfstring{A Frequentist \(t\)-test}{A Frequentist t-test}}\label{a-frequentist-t-test}}
To begin we'll first do a normal \(t\)-test with a sample data set as a basis for later comparison
\hypertarget{the-plant-growth-data-set}{%
\subsection{The Plant Growth data set}\label{the-plant-growth-data-set}}
You may recall the Plant Growth data set we used in the Linear Models course, here's a reminder
\begin{verbatim}
## weight group
## Min. :3.590 ctrl:10
## 1st Qu.:4.550 trt1:10
## Median :5.155 trt2:10
## Mean :5.073
## 3rd Qu.:5.530
## Max. :6.310
\end{verbatim}
\includegraphics{bayes_factors_files/figure-latex/unnamed-chunk-21-1.pdf}
We will use this as an example data set, specifically we'll use \texttt{ctrl} and \texttt{trt2} data, which we need to extract. Note the mean values for \texttt{trt2} look larger than \texttt{ctrl}.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(dplyr)}
\NormalTok{pg_small <-}\StringTok{ }\NormalTok{PlantGrowth }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{filter}\NormalTok{(group }\OperatorTok{%in%}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"trt2"}\NormalTok{, }\StringTok{"ctrl"}\NormalTok{)) }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{droplevels}\NormalTok{()}
\end{Highlighting}
\end{Shaded}
let's calculate too the sample difference mean and the standardised effect size, as it will be important to know these values later
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(tidyr)}
\NormalTok{pg_small }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{group_by}\NormalTok{(group) }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{mean_weight =} \KeywordTok{mean}\NormalTok{(weight)) }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{pivot_wider}\NormalTok{( }\DataTypeTok{names_from =}\NormalTok{ group, }\DataTypeTok{values_from =}\NormalTok{ mean_weight) }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{mean_sample_diff =} \StringTok{`}\DataTypeTok{trt2}\StringTok{`} \OperatorTok{-}\StringTok{ `}\DataTypeTok{ctrl}\StringTok{`}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 1 x 1
## mean_sample_diff
## <dbl>
## 1 0.494
\end{verbatim}
So the mean of \texttt{trt2} is bigger than \texttt{ctrl} by 0.49 g.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(effectsize)}
\KeywordTok{cohens_d}\NormalTok{(weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data=}\NormalTok{pg_small)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Cohen's d | 95% CI
## --------------------------
## -0.95 | [-1.87, -0.01]
##
## - Estimated using pooled SD.
\end{verbatim}
And correspondingly the standardised effect size is large. The effect size is negative because the calculation has been done in the order that the groups appear in the data. \texttt{ctrl} comes first so the calculation was \texttt{ctrl} - \texttt{trt2} which is a negative value. For now, this won't matter. We will need to pay attention to it later.
\hypertarget{two-sample-t-test}{%
\subsection{\texorpdfstring{Two Sample \(t\)-test}{Two Sample t-test}}\label{two-sample-t-test}}
Let's now do the \(t\)-tests. The hypotheses for a test comparing the treatment groups are
\begin{itemize}
\tightlist
\item
\(H_0 : \bar{trt2} - \bar{ctrl} = 0\) IE the mean sample difference is 0
\item
\(H_1 : \bar{trt2} - \bar{ctrl} \neq 0\) IE the mean sample difference is not 0
\end{itemize}
Using these data to do a \(t\)-test is easy, we'll specify a cut-off of 0.05 for rejection of \(H_0\).
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{model <-}\StringTok{ }\KeywordTok{lm}\NormalTok{(weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data =}\NormalTok{ pg_small)}
\KeywordTok{summary}\NormalTok{(model)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## Call:
## lm(formula = weight ~ group, data = pg_small)
##
## Residuals:
## Min 1Q Median 3Q Max
## -0.862 -0.410 -0.006 0.280 1.078
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 5.0320 0.1637 30.742 <2e-16 ***
## grouptrt2 0.4940 0.2315 2.134 0.0469 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 0.5176 on 18 degrees of freedom
## Multiple R-squared: 0.2019, Adjusted R-squared: 0.1576
## F-statistic: 4.554 on 1 and 18 DF, p-value: 0.04685
\end{verbatim}
We get a \(p\)-value of 0.046 which is less than our cut-off of 0.05 so we reject \(H_0\) as unlikely and accept \(H_1\) without explicitly testing it. Our conclusion scientifically is that \texttt{trt2} has greater \texttt{weight} than \texttt{ctrl}.
\hypertarget{a-bayesian-t-test}{%
\section{\texorpdfstring{A Bayesian \(t\)-test}{A Bayesian t-test}}\label{a-bayesian-t-test}}
Now let's set up a BayesFactor \(t\)-test. First we must set our hypotheses. The null hypothesis is similar to that in the frequentist \(t\)-test, the idea is that there is no effect which we formulated above as
\begin{itemize}
\tightlist
\item
\(H_0 : \bar{trt2} - \bar{ctrl} = 0\) IE the mean sample difference is 0
\end{itemize}
Another way to say this is that the effect size \(d\) is 0 so
\begin{itemize}
\tightlist
\item
\(H_0 : d = 0\)
\end{itemize}
Because we need something to compare against we now need to form the alternative hypothesis. By default the \texttt{ttestBF()} function tests the alternative hypothesis that the effect size is not 0
\begin{itemize}
\tightlist
\item
\(H_1 : d \neq 0\)
\end{itemize}
and returns the Bayes Factor we need. Performing the test is straightforward
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(BayesFactor)}
\KeywordTok{ttestBF}\NormalTok{(}\DataTypeTok{formula =}\NormalTok{ weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data =}\NormalTok{ pg_small)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 : 1.774688 ±0%
##
## Against denominator:
## Null, mu1-mu2 = 0
## ---
## Bayes factor type: BFindepSample, JZS
\end{verbatim}
We get a clear answer, the output on the line marked\texttt{{[}1{]}} is a Bayes Factor and states that the data are 1.77 times more likely if \(H_1\) were true than if \(H_0\) were true. In other words the odds of the data favouring the \(H_1\) to \(H_0\) are 1.77:1. Which is the answer we wanted to get, we have explicitly tested \(H_0\) and \(H_1\) and found that \(H_1\) is more likely to fit the data.
\hypertarget{comparing-p-and-the-bayes-factor-for-the-plantgrowth-data}{%
\section{\texorpdfstring{Comparing \(p\) and the Bayes Factor for the PlantGrowth data}{Comparing p and the Bayes Factor for the PlantGrowth data}}\label{comparing-p-and-the-bayes-factor-for-the-plantgrowth-data}}
Comparing to our table of interpretation of Bayes Factors, we see that this corresponds only to `Anecdotal Evidence' in favour of \(H_1\), which sounds weak and like really there isn't much evidence for the idea that the two samples are different. Do we find this surprising given that the \(p\)-value from the \(t\)-test was significant? Does this mean that the two methods disagree? Strictly speaking, no, we shouldn't be surprised and no they don't disagree.
It's a bit of an apples and oranges situation. The two values are answers to very different questions.
As we've said before the frequentist \(p\)-value only measures the proportion of times a difference of the measured size would occur under some presumed background model. It does not measure the evidence that the hypothesis is true even though that is how many people try to interpret it. \(p\) only tells us how often we would be wrong if we reject \(H_0\). As a result, many philosophers have stated that \(p\) based significance is a fundamentally uninteresting measure - who cares how often a difference occurs in some ideal world - what is important is the relative fit of the competing hypotheses to the data and that this measure of the strength of evidence per hypothesis is more in line with the interests of researchers.
Taken together our \(p\)-value states that the difference between the means of \texttt{trt2} and \texttt{ctrl} we observed occurs by chance in a normal distribution less than 0.05\% of the time and the Bayes Factor tells us that the odds that the data favour the idea that \texttt{trt2} is not the same as \texttt{ctrl} are only 1.7 times greater than the idea that \texttt{trt2} and \texttt{ctrl} are equal. We can see that the two methods do not contradict.
Hopefully this brings home the idea that Bayes Factor is different and arguably closer to what many scientists think they are doing when they do frequentist statistics.
Interpreting these results correctly, then, logically means that a researcher is not likely to be very excited by the results and would not over value the significance of the observed difference.
\hypertarget{better-hypotheses---one-tailed-tests}{%
\section{Better Hypotheses - One-tailed tests}\label{better-hypotheses---one-tailed-tests}}
But looking at the hypotheses we generated, could we ask a better, more informative one? With frequentist tests, no, but with Bayes Factors we can test different hypothesis. Instead of asking whether \texttt{trt2} is the same as \texttt{ctrl} or not we could ask something more specific. We are likely interested in whether \texttt{trt2} is greater than \texttt{ctrl}, or in other words that the effect size is greater than 0
\begin{itemize}
\tightlist
\item
\(H_1 : d > 0\)
\end{itemize}
We can specify this \(H_1\) by setting the \texttt{nullInterval} argument, this is just the range we expect the effect sizes to be in under the null hypothesis, so we can use 0 to Infinity to cover any increased effect size (and -Infinity to 0 for any decreased effect size.
\hypertarget{a-data-frame-based-gotcha}{%
\subsection{A data frame based gotcha}\label{a-data-frame-based-gotcha}}
Here is where we can run afoul of R's idiosyncarcies - it is important to be careful here because the order of the data in the dataframe can have an effect that can confuse us. Recall that our effect size calculation for these data came out negative because \texttt{ctrl} came before \texttt{trt2}. Look at the dataframe \texttt{pg\_small}.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{str}\NormalTok{(pg_small)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## 'data.frame': 20 obs. of 2 variables:
## $ weight: num 4.17 5.58 5.18 6.11 4.5 4.61 5.17 4.53 5.33 5.14 ...
## $ group : Factor w/ 2 levels "ctrl","trt2": 1 1 1 1 1 1 1 1 1 1 ...
\end{verbatim}
Note that the \texttt{ctrl} level in the \texttt{group} factor is first, we need to think of our \(H_1\) more carefully,
\begin{itemize}
\tightlist
\item
\(H_1 : d > 0\)
\end{itemize}
really is in this case
\begin{itemize}
\tightlist
\item
\(H_1 : \bar{\text{trt2}} - \bar{\text{ctrl}} > 0\)
\end{itemize}
so we need to make sure that \texttt{trt2} comes first in the \texttt{group} factor. We can use the \texttt{\$} notation to reorder the factor as we wish
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{pg_small}\OperatorTok{$}\NormalTok{group <-}\StringTok{ }\KeywordTok{factor}\NormalTok{(pg_small}\OperatorTok{$}\NormalTok{group, }
\DataTypeTok{levels=}\KeywordTok{c}\NormalTok{(}\StringTok{"trt2"}\NormalTok{, }\StringTok{"ctrl"}\NormalTok{) )}
\KeywordTok{str}\NormalTok{(pg_small)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## 'data.frame': 20 obs. of 2 variables:
## $ weight: num 4.17 5.58 5.18 6.11 4.5 4.61 5.17 4.53 5.33 5.14 ...
## $ group : Factor w/ 2 levels "trt2","ctrl": 2 2 2 2 2 2 2 2 2 2 ...
\end{verbatim}
\hypertarget{performing-the-one-tailed-test}{%
\section{Performing the One-tailed test}\label{performing-the-one-tailed-test}}
With that done we can move back on with the one-sided test, specifying the interval as expected.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{ttestBF}\NormalTok{(}\DataTypeTok{formula =}\NormalTok{ weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data =}\NormalTok{ pg_small, }\DataTypeTok{nullInterval=}\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\OtherTok{Inf}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 0<d<Inf : 3.387166 ±0%
## [2] Alt., r=0.707 !(0<d<Inf) : 0.1622109 ±0%
##
## Against denominator:
## Null, mu1-mu2 = 0
## ---
## Bayes factor type: BFindepSample, JZS
\end{verbatim}
Performing the test was nice and easy and we get an answer. The first line of the output \texttt{{[}1{]}} states the odds that the data favour the alternative hypothesis over the null are 3.38:1. The Bayes Factor is increased over the earlier more vague hypothesis, suggesting there is actually substantial evidence for the idea that the effect size is greater than 0.
\hypertarget{testing-the-effect-of-the-prior}{%
\section{Testing the effect of the prior}\label{testing-the-effect-of-the-prior}}
We discussed that one of the limitations of Bayesian Inference was the need to carefully and justifiably select a prior and that doing so was difficult. We'll look at that a little bit now as we did make a decision on this albeit implicitly by allowing the defaults of the \texttt{ttestBF()} function.
In our \texttt{ttestBF()} function we actually need to provide a prior distribution for the maths to work, not just a single value. We don't want to get into details of those maths as they are out of scope but we do need to know that the prior distribution needs to cover a range of effect sizes that might be plausible if the null hypothesis were false.
The \texttt{BayesFactor} package provides a Cauchy distribution as default. Since the selection of the prior implies that we know something about our dat, using the Cauchy implies that we think the population is normally distributed (which is the same distribution we assume under the standard frequentist statistical tests).
\hypertarget{the-cauchy-prior-distribution}{%
\subsection{The Cauchy Prior Distribution}\label{the-cauchy-prior-distribution}}
The Cauchy is a distribution with a single parameter called \texttt{scale} that affects how wide its main humpy bit is. In \texttt{BayesFactor} there are three widths we can choose from depending on how big a difference we think we are seeing, that is how big the effect size. When plotted, these distributions look like this
\includegraphics{bayes_factors_files/figure-latex/unnamed-chunk-30-1.pdf}
and the \texttt{name} corresponds to \texttt{scale} values as follows
\begin{tabular}{l|r}
\hline
name & scale\\
\hline
medium & 0.71\\
\hline
wide & 1.00\\
\hline
ultrawide & 1.41\\
\hline
\end{tabular}
In each of the distributions 50\% of the area under the curve falls within +/- the scale value.
Since the scale on the \(x\)-axis in our plot is effect size, the choice of scale values says somerthing about what we are expecting our effect sizes to be like. The wider the scale value, the bigger we are expecting our effect sizes to be.
Our effect size in the \texttt{PlantGrowth} data was 0.95 so well within the area covered by the \texttt{medium} scale Cauchy, much more of that curve falls within the -0.95 to +0.95 effect size range than the other two, so we might think that one would be a better fit. That's why it's the default, it's a good fit for generally found effect sizes.
\hypertarget{the-effect-of-changing-the-prior}{%
\subsection{The effect of changing the prior}\label{the-effect-of-changing-the-prior}}
As an exercise to help us understand the importance of the prior and explicitly NOT a guide to maximising the odds in favour of one model over another. Let's look at how changing the scale via the \texttt{rscale} parameter in \texttt{ttestBF()} affects the odds of our one sided model.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{ttestBF}\NormalTok{(}\DataTypeTok{formula =}\NormalTok{ weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data =}\NormalTok{ pg_small, }\DataTypeTok{nullInterval=}\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\OtherTok{Inf}\NormalTok{), }\DataTypeTok{rscale=}\StringTok{"medium"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Bayes factor analysis
## --------------
## [1] Alt., r=0.707 0<d<Inf : 3.387166 ±0%
## [2] Alt., r=0.707 !(0<d<Inf) : 0.1622109 ±0%
##
## Against denominator:
## Null, mu1-mu2 = 0
## ---
## Bayes factor type: BFindepSample, JZS
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{ttestBF}\NormalTok{(}\DataTypeTok{formula =}\NormalTok{ weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data =}\NormalTok{ pg_small, }\DataTypeTok{nullInterval=}\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\OtherTok{Inf}\NormalTok{), }\DataTypeTok{rscale=}\StringTok{"wide"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Bayes factor analysis
## --------------
## [1] Alt., r=1 0<d<Inf : 3.22134 ±0%
## [2] Alt., r=1 !(0<d<Inf) : 0.1189759 ±0%
##
## Against denominator:
## Null, mu1-mu2 = 0
## ---
## Bayes factor type: BFindepSample, JZS
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{ttestBF}\NormalTok{(}\DataTypeTok{formula =}\NormalTok{ weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data =}\NormalTok{ pg_small, }\DataTypeTok{nullInterval=}\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\OtherTok{Inf}\NormalTok{), }\DataTypeTok{rscale=}\StringTok{"ultrawide"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Bayes factor analysis
## --------------
## [1] Alt., r=1.414 0<d<Inf : 2.857414 ±0%
## [2] Alt., r=1.414 !(0<d<Inf) : 0.08596477 ±0%
##
## Against denominator:
## Null, mu1-mu2 = 0
## ---
## Bayes factor type: BFindepSample, JZS
\end{verbatim}
Indeed we do get stronger odds for the alternative hypothesis in the \texttt{medium} scale than the others. Note that it isn't wise to go Bayes Factor fishing by post-hoc selecting the prior in order to maximise the Bayes Factor. This example was an exercise to show that prior selection is important.
\begin{roundup}
\begin{itemize}
\tightlist
\item
Bayes Factor \(t\)-tests allow us to directly compare hypothesis about data in a way that is analogous to \(t\)-tests.
\item
We can compare different hypotheses
\item
The interpretation of a BayesFactor tells us which of the hypotheses are favoured by the data
\item
Prior selection is important, but \texttt{ttestBF()} restricts us to sensible options for data we assume to be normal
\end{itemize}
\end{roundup}
\hypertarget{bayes-factor-anova}{%
\chapter{Bayes Factor ANOVA}\label{bayes-factor-anova}}
\hypertarget{about-this-chapter-3}{%
\section{About this chapter}\label{about-this-chapter-3}}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Questions
\begin{itemize}
\tightlist
\item
How can I do an ANOVA?
\end{itemize}
\item
Objectives
\begin{itemize}
\tightlist
\item
Understand multiplicity is not a problem for Bayes Factors
\end{itemize}
\item
Keypoints
\begin{itemize}
\tightlist
\item
The package \texttt{simplebf} automates Bayes Factor \(t\)-tests for many samples
\end{itemize}
\end{enumerate}
\hypertarget{the-issue-of-multiplicity-in-frequentism-and-bayesianism}{%
\section{The issue of multiplicity in Frequentism and Bayesianism}\label{the-issue-of-multiplicity-in-frequentism-and-bayesianism}}
The ANOVA is often seen to be a catch-all test that can be used for an experiment that has more than two samples in it. Experimenters often understand this to be true on the basis that `you shouldn't do \(t\)-tests for more than two samples by repeating the \(t\)-test'. This is quite true and is a strategy for avoidance of the problem of multiplicity.
Multiplicity or multiple testing occurs when we do lots of tests one after the other, in a batch. The more we do, the more likely we are to make an error in our conclusions (not in our working). This happens in Frequentist statistical tests because the \(p\)-value expresses a fixed error rate that we are happy to accept.
Recall that the \(t\)-test has two hypotheses (of which we test just one)
\(H_0 : \bar{x_1} - \bar{x_2} = 0\)
\(H_1 : \bar{x_1} - \bar{x_2} \neq 0\)
and we set a level at which would reject \(H_0\) usually \(p < 0.05\). The \(p\) reflects the proportion of times that the difference observed is seen in the null model by chance (so we see the difference 1 in 20 times by chance), in other words in a proportion of 0.95 of times we would reject the null correctly. Which is fine for just one comparison.
If we do more than one test we must multiply these probabilities together, giving \(0.95 * 0.95 = 0.9025\). This is catastrophic, by doing just two tests we reduce the proportion of times we choose the correct hypothesis to 0.9025, down from 19/20 to 18/20, we make twice as many mistakes! For more tests this gets worse.
Frequentist statistics have lots of corrections for this sort of problem and the ANOVA post-hoc tests are in part a way of doing that. The good news for those using Bayes Factors is that this problem does not exist. Because we don't have a fixed error rate, it doesn't get bigger when we do more tests. We are free to do as many hypothesis comparisons as we wish.
\hypertarget{automating-bayesfactorttest-for-many-comparisions}{%
\section{\texorpdfstring{Automating \texttt{BayesFactor::ttest()} for many comparisions}{Automating BayesFactor::ttest() for many comparisions}}\label{automating-bayesfactorttest-for-many-comparisions}}
As there isn't a need for a Bayes Factor analogue to the ANOVA and post-hoc tests, we can just use the \(t\)-test analogue over and over again. If we have a multiple sample dataset we just need a book-keeping method to pull out the samples of interest.
Let's draft one with \texttt{dplyr} and the Plant Growth data set.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(dplyr)}
\KeywordTok{library}\NormalTok{(BayesFactor)}
\NormalTok{small_df <-}\StringTok{ }\NormalTok{PlantGrowth }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{filter}\NormalTok{(group }\OperatorTok{%in%}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"ctrl"}\NormalTok{, }\StringTok{"trt1"}\NormalTok{)) }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\KeywordTok{droplevels}\NormalTok{()}
\KeywordTok{ttestBF}\NormalTok{(}\DataTypeTok{formula =}\NormalTok{ weight }\OperatorTok{~}\StringTok{ }\NormalTok{group, }\DataTypeTok{data =}\NormalTok{ small_df)}
\end{Highlighting}
\end{Shaded}
This pattern helps you extract the pairs of samples you need, though you would need to repeat it every time you wanted to analyse a new pair. A convenience function for the simple case that allows us to do \texttt{BayesFactor::ttestBF()} for all pairs in a specified column in a dataframe exists in the package \texttt{simplebf}. It works like this:
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(simplebf)}
\NormalTok{result <-}\StringTok{ }\KeywordTok{allpairs_ttestbf}\NormalTok{(PlantGrowth, }
\DataTypeTok{group_col =} \StringTok{"group"}\NormalTok{, }\DataTypeTok{data_col =} \StringTok{"weight"}\NormalTok{, }
\DataTypeTok{rscale =} \StringTok{"medium"}\NormalTok{, }
\DataTypeTok{h_1 =} \StringTok{"test_greater_than_control"}\NormalTok{)}
\NormalTok{knitr}\OperatorTok{::}\KeywordTok{kable}\NormalTok{(result, }\DataTypeTok{digits =} \DecValTok{4}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{tabular}{l|l|l|l|r|l|l}
\hline
control\_group & test\_group & h\_0 & h\_1 & BayesFactor & odds\_h\_1 & summary\\
\hline
trt1 & ctrl & ctrl equal to trt1 & ctrl greater than trt1 & 1.0834 & 1:1.0834 & Anecdotal evidence for H\_1 compared to H\_0\\
\hline
trt2 & ctrl & ctrl equal to trt2 & ctrl greater than trt2 & 0.1622 & 1:0.1622 & Substantial evidence for H\_0 compared to H\_1\\
\hline
ctrl & trt1 & trt1 equal to ctrl & trt1 greater than ctrl & 0.2167 & 1:0.2167 & Substantial evidence for H\_0 compared to H\_1\\
\hline
trt2 & trt1 & trt1 equal to trt2 & trt1 greater than trt2 & 0.1363 & 1:0.1363 & Substantial evidence for H\_0 compared to H\_1\\
\hline
ctrl & trt2 & trt2 equal to ctrl & trt2 greater than ctrl & 3.3872 & 1:3.3872 & Substantial evidence for H\_1 compared to H\_0\\
\hline
trt1 & trt2 & trt2 equal to trt1 & trt2 greater than trt1 & 12.6445 & 1:12.6445 & Strong evidence for H\_1 compared to H\_0\\
\hline
\end{tabular}
The results are pretty easy to read. Note we can set \texttt{rscale} values as in the \texttt{ttestBF()} and we can choose one of three values for \(H_1\) \texttt{test\_greater\_than\_control}, \texttt{test\_less\_than\_control} and \texttt{test\_not\_equal\_to\_control}.
\begin{roundup}
\begin{itemize}
\tightlist
\item
Bayes Factors do not need multiple hypothesis corrections
\item
\texttt{simplebf} is a package for automating the comparison of all groups in a single variable in a tidy dataframe
\end{itemize}
\end{roundup}
\hypertarget{bayes-factor-on-contingency-tables-and-proportions}{%
\chapter{Bayes Factor on Contingency Tables and Proportions}\label{bayes-factor-on-contingency-tables-and-proportions}}
\hypertarget{about-this-chapter-4}{%
\section{About this chapter}\label{about-this-chapter-4}}
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\tightlist
\item
Questions
\begin{itemize}
\tightlist
\item
How can I do a categoric count based \(\chi^2\) or proportional test with Bayes Factors?
\end{itemize}
\item
Objectives
\begin{itemize}
\tightlist
\item
Perform \(\chi^2\) on contingency tables of any size
\end{itemize}
\item
Keypoints
\begin{itemize}
\tightlist
\item
\texttt{BayesFactor} and \texttt{simplebf} provide functions and automations for categorical count or frequency data
\item
These are useful for HR scoring data
\end{itemize}
\end{enumerate}
The \texttt{BayesFactor} package has some functions for performing other types of tests and returning a Bayes Factor. In this section we will briefly look at these.
\hypertarget{bayes-factor-chi2}{%
\section{\texorpdfstring{Bayes Factor \(\chi^2\)}{Bayes Factor \textbackslash chi\^{}2}}\label{bayes-factor-chi2}}
A common question is whether proportions of counted things or frequency is different between samples. The one we typically learn first as biologists is Mendel's pea data that led to his genetic insights, like this 2x2 table for flower colour (purple or white). Note that we have the counts of flower colour that were observed and expected counts that would come from a 3:1 Mendelian segregating cross.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{mendel_data}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## P W
## observed 459 141
## expected 450 150
\end{verbatim}
The \(\chi^2\) test is the classical frequentist test performed to determine differences in proportions in a contingency table, and there is an equivalent Bayesian method in \texttt{BayesFactor}. We can run our data through the function \texttt{contingencyTableBF()} very easily, but it does need the data to be an R matrix object, not the more typical dataframe. We can change that easily with \texttt{as.matrix()}, then run the function.
The arguments are important: \texttt{fixedMargin} describes whether the variable of interest is in the rows or columns of the table - here it is in the columns so we use \texttt{cols}; \texttt{sampleType} describes what the function should do in the Bayesian sampling process as it runs. This is highly technical and out of scope for what we want to discuss, so I'm going to gloss over it. The function documentation has more information if you want it (\texttt{?contingencyTableBF}) the option used here \texttt{indepMulti} is a good one to start with.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{mendel_matrix <-}\StringTok{ }\KeywordTok{as.matrix}\NormalTok{(mendel_data)}
\KeywordTok{library}\NormalTok{(BayesFactor)}
\KeywordTok{contingencyTableBF}\NormalTok{(mendel_matrix, }\DataTypeTok{sampleType =} \StringTok{"indepMulti"}\NormalTok{, }\DataTypeTok{fixedMargin=}\StringTok{'cols'}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Bayes factor analysis
## --------------
## [1] Non-indep. (a=1) : 0.1011097 ±0%
##
## Against denominator:
## Null, independence, a = 1
## ---
## Bayes factor type: BFcontingencyTable, independent multinomial
\end{verbatim}
The hypotheses that are tested in this example are fixed and simple ones. Strictly \(H_0\) is that the proportions in the table are equal and \(H_1\) is that the proportions are not equal. So in effect the whole table is tested to see whether the observed counts are different to the expected counts. Here we see that the odds are 1:0.101 \emph{against} \(H_1\) so the conclusion is that the proportions are equal, that is our observed flower colour proportions match the expected.
There isn't a way to use different \(H_1\)'s in the way that we did with the Bayes Factor \(t\)-test, so we can't test the explicit hypothesis that one is bigger (or smaller than the other).
\hypertarget{converting-a-dataframe-to-a-contingency-table}{%
\subsection{Converting a dataframe to a contingency table}\label{converting-a-dataframe-to-a-contingency-table}}
In most of our work we've used tidy data (or case based data) in dataframes. The function we just learned uses a contingency table in a matrix, not a dataframe. Sometimes too, we will want to make a contingency table to see it. We can make a contingency table out of a dataframe with the \texttt{table} function, we just have to select the columns we want using the \texttt{\$} notation.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{hr_df}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## # A tibble: 9 x 3
## strain replicate score
## <chr> <dbl> <dbl>
## 1 control 1 1
## 2 mild 1 3
## 3 deadly 1 4
## 4 control 2 2
## 5 mild 2 3
## 6 deadly 2 4
## 7 control 3 1
## 8 mild 3 3
## 9 deadly 3 3
\end{verbatim}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{hr_cont_table <-}\StringTok{ }\KeywordTok{table}\NormalTok{(hr_df}\OperatorTok{$}\NormalTok{score,hr_df}\OperatorTok{$}\NormalTok{strain) }
\end{Highlighting}
\end{Shaded}
\hypertarget{bigger-contingency-tables}{%
\subsection{Bigger contingency tables}\label{bigger-contingency-tables}}
Sometime we'll have a contingency table of counts that is larger than 2 x 2 IE we have more than two samples and more than two levels of a variable. For example we might have this HR scoring table.
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{hr_table}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
##
## control deadly mild
## 1 2 0 0
## 2 1 0 0
## 3 0 1 3
## 4 0 2 0
\end{verbatim}
As we can see it shows an HR score in the rows and different strains in the columns. The numbers represent the count of times each score was seen in three replicated experiments. Because it's a contingency table the replicates are merged in together. It is important therefore that the same amount of sampling was done in each strain.
Here we would want to compare the two basic hypotheses of whether the proportions of observed scores are different between the strains are the same or not. Let's go ahead and do that with \texttt{contingencyTableBF()}
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{contingencyTableBF}\NormalTok{(hr_table, }\DataTypeTok{sampleType =} \StringTok{"indepMulti"}\NormalTok{, }\DataTypeTok{fixedMargin =} \StringTok{"cols"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{verbatim}
## Bayes factor analysis
## --------------
## [1] Non-indep. (a=1) : 11.55 ±0%
##
## Against denominator:
## Null, independence, a = 1
## ---
## Bayes factor type: BFcontingencyTable, independent multinomial
\end{verbatim}
We get a clear answer, the Bayes Factor strongly favours the hypothesis that the proportions of scores across strains are not equal. Which is nice but it doesn't go far enough - it doesn't tell us which are bigger than others and whether the conclusion applies to all the possible pairings of strains. This is the same problem we had with the Bayes Factor \(t\)-test and the solution is the same. We can just pull out each pair of strains and compare them one pair at a time. All we need is a book-keeping method to do this. The library \texttt{simplebf} contains one, so let's use that.
We can use the \texttt{allpairs\_proportionbf()} function to get a data frame of Bayes Factors. If you pass this function a dataframe it will make the contingency table for you. You must specify which columns to use for the group and the counts. For easy reading we'll send the output to the \texttt{knitr::kable()} function.
\begin{Shaded}
\begin{Highlighting}[]
\KeywordTok{library}\NormalTok{(simplebf)}
\KeywordTok{allpairs_proportionbf}\NormalTok{(hr_df, }
\DataTypeTok{group_col =} \StringTok{"strain"}\NormalTok{, }\DataTypeTok{count_col =} \StringTok{"score"}\NormalTok{, }
\DataTypeTok{sample_type =} \StringTok{"indepMulti"}\NormalTok{) }\OperatorTok{%>%}\StringTok{ }
\StringTok{ }\NormalTok{knitr}\OperatorTok{::}\KeywordTok{kable}\NormalTok{()}
\end{Highlighting}
\end{Shaded}
\begin{tabular}{l|l|l|l|r|l|l}
\hline
control\_group & test\_group & h\_0 & h\_1 & BayesFactor & odds\_h\_1 & summary\\
\hline
control & mild & mild proportions equal to control proportions & mild proportions not equal to control proportions & 5.6000 & 1:5.6 & Substantial evidence for H\_1 compared to H\_0\\
\hline
control & deadly & deadly proportions equal to control proportions & deadly proportions not equal to control proportions & 4.2000 & 1:4.2 & Substantial evidence for H\_1 compared to H\_0\\
\hline
mild & deadly & deadly proportions equal to mild proportions & deadly proportions not equal to mild proportions & 2.1875 & 1:2.1875 & Anecdotal evidence for H\_1 compared to H\_0\\
\hline
\end{tabular}
So we get a nice set of Bayesian Hypothesis test for proportion or contingency table data on our HR experiment.
\begin{roundup}
\begin{itemize}
\tightlist
\item
Bayes Factors can be used for proportion tests like the \(\chi^2\)
\item
The \texttt{BayesFactor} and \texttt{simplebf} packages are useful tools implementing these
\end{itemize}
\end{roundup}
\bibliography{book.bib,packages.bib}
\end{document}
| {
"alphanum_fraction": 0.7452959382,
"avg_line_length": 48.263053536,
"ext": "tex",
"hexsha": "30a454da1300f87b0f56def923d4936d2a6c3ca6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b2a2a53b65b4df7fa7cc51e6639f3d0c1b6706a3",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "danmaclean/bayes_factors",
"max_forks_repo_path": "docs/bayes_factors.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b2a2a53b65b4df7fa7cc51e6639f3d0c1b6706a3",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "danmaclean/bayes_factors",
"max_issues_repo_path": "docs/bayes_factors.tex",
"max_line_length": 786,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b2a2a53b65b4df7fa7cc51e6639f3d0c1b6706a3",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "danmaclean/bayes_factors",
"max_stars_repo_path": "docs/bayes_factors.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 21083,
"size": 73022
} |
\section{[No Reference] Buffer Overflow Functions}
This analysis detects possible buffer overflows due to the usage of
'unsafe' function calls. The results need to be either inspected by the user
or if applicable, unsafe function calls can be exchanged against their
safe counterparts.
\subsection{Non-Compliant Code Examples}
\code{
\noindent
\ttfamily
\hlstd{}\hlline{\ \ \ \ 1\ }\hlstd{}\hldir{\#include\ $<$stdio.h$>$\\
}\hlline{\ \ \ \ 2\ }\hldir{}\hlstd{}\hldir{\#include\ $<$string.h$>$\\
}\hlline{\ \ \ \ 3\ }\hldir{}\hlstd{\\
}\hlline{\ \ \ \ 4\ }\hlstd{}\hlkey{using\ namespace\ }\hlstd{std;\\
}\hlline{\ \ \ \ 5\ }\hlstd{\\
}\hlline{\ \ \ \ 6\ }\hlstd{}\hltyp{void\ }\hlstd{fail}\hlsym{()\ \{\\
}\hlline{\ \ \ \ 7\ }\hlsym{\hlstd{\ \ }}\hlstd{}\hltyp{char\ }\hlstd{string}\hlsym{[}\hlstd{}\hlnum{50}\hlstd{}\hlsym{]}\hlstd{;\\
}\hlline{\ \ \ \ 8\ }\hlstd{\hlstd{\ \ }}\hltyp{int\ }\hlstd{file\textunderscore number\ =\ }\hlnum{0}\hlstd{;\\
}\hlline{\ \ \ \ 9\ }\hlstd{\hlstd{\ \ }sprintf}\hlsym{(\ }\hlstd{string,\ }\hlstr{"file.\%d"}\hlstd{,\ file\textunderscore number\ }\hlsym{)}\hlstd{;\\
}\hlline{\ \ \ 10\ }\hlstd{\\
}\hlline{\ \ \ 11\ }\hlstd{\hlstd{\ \ }}\hltyp{char\ }\hlstd{result}\hlsym{[}\hlstd{}\hlnum{100}\hlstd{}\hlsym{]}\hlstd{;\\
}\hlline{\ \ \ 12\ }\hlstd{\hlstd{\ \ }}\hltyp{float\ }\hlstd{fnum\ =\ }\hlnum{3.14159}\hlstd{;\\
}\hlline{\ \ \ 13\ }\hlstd{\hlstd{\ \ }sprintf}\hlsym{(\ }\hlstd{result,\ }\hlstr{"\%f"}\hlstd{,\ fnum\ }\hlsym{)}\hlstd{;\\
}\hlline{\ \ \ 14\ }\hlstd{\\
}\hlline{\ \ \ 15\ }\hlstd{\\
}\hlline{\ \ \ 16\ }\hlstd{\hlstd{\ \ }}\hltyp{char\ }\hlstd{str1}\hlsym{[]}\hlstd{=}\hlstr{"Sample\ string"}\hlstd{;\\
}\hlline{\ \ \ 17\ }\hlstd{\hlstd{\ \ }}\hltyp{char\ }\hlstd{str2}\hlsym{[}\hlstd{}\hlnum{40}\hlstd{}\hlsym{]}\hlstd{;\\
}\hlline{\ \ \ 18\ }\hlstd{\hlstd{\ \ }}\hltyp{char\ }\hlstd{str3}\hlsym{[}\hlstd{}\hlnum{40}\hlstd{}\hlsym{]}\hlstd{;\\
}\hlline{\ \ \ 19\ }\hlstd{\hlstd{\ \ }memcpy\ }\hlsym{(}\hlstd{str2,str1,strlen}\hlsym{(}\hlstd{str1}\hlsym{)}\hlstd{+}\hlnum{1}\hlstd{}\hlsym{)}\hlstd{;\\
}\hlline{\ \ \ 20\ }\hlstd{\hlstd{\ \ }memcpy\ }\hlsym{(}\hlstd{str3,}\hlstr{"copy\ successful"}\hlstd{,}\hlnum{16}\hlstd{}\hlsym{)}\hlstd{;\\
}\hlline{\ \ \ 21\ }\hlstd{\hlstd{\ \ }printf\ }\hlsym{(}\hlstd{}\hlstr{"str1:\ \%s}\hlesc{$\backslash$n}\hlstr{str2:\ \%s}\hlesc{$\backslash$n}\hlstr{str3:\ \%s}\hlesc{$\backslash$n}\hlstr{"}\hlstd{,str1,str2,str3}\hlsym{)}\hlstd{;\\
}\hlline{\ \ \ 22\ }\hlstd{\\
}\hlline{\ \ \ 23\ }\hlstd{}\hlsym{\}}\hlstd{}\\
\mbox{}\\
\normalfont
}
\subsection{Compliant Solution}
Example as above; use snprintf instead of sprintf.
\subsection{Parameter Requirements}
None.
\subsection{Implementation}
The following functions are checked for
\begin{itemize}
\item sprintf
\item scanf
\item sscanf
\item gets
\item strcpy
\item \_mbscpy
\item lstrcat
\item memcpy
\item strcat
\end{itemize}
\subsection{References}
\htmladdnormallink{Foster}{} , ``James C.Foster, Vitaly Osipov, Nish Bhalla, Niels Heinen, Buffer Overflow Attacks, ISBN 1-932266-67-4, p. 211''
| {
"alphanum_fraction": 0.6115782665,
"avg_line_length": 33.247311828,
"ext": "tex",
"hexsha": "f304ec8576ac80e312949b2ba3595937155d5a56",
"lang": "TeX",
"max_forks_count": 146,
"max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z",
"max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z",
"max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "sujankh/rose-matlab",
"max_forks_repo_path": "projects/compass/extensions/checkers/bufferOverflowFunctions/bufferOverflowFunctionsDocs.tex",
"max_issues_count": 174,
"max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z",
"max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "sujankh/rose-matlab",
"max_issues_repo_path": "projects/compass/extensions/checkers/bufferOverflowFunctions/bufferOverflowFunctionsDocs.tex",
"max_line_length": 234,
"max_stars_count": 488,
"max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "maurizioabba/rose",
"max_stars_repo_path": "projects/compass/extensions/checkers/bufferOverflowFunctions/bufferOverflowFunctionsDocs.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z",
"num_tokens": 1292,
"size": 3092
} |
\section{Introduction}
\label{sec:intro}
One fundamental task in \emph{Bayesian networks} (BNs) \cite{pear88} is inference.
Given that some variables have been observed, the task is to compute posterior probabilities of other variables.
\cite{koll09} introduce readers to exact inference in discrete BNs with the \emph{Variable Elimination} (VE) \cite{zhan94} algorithm.
The VE algorithm uses its own terminology such as elimination orderings to modify the \emph{conditional probability tables} (CPTs) of the BN to answer queries.
Another fundamental task in BNs is modeling, by which we mean testing which conditional independence relations hold in a given BN.
More specifically, we want to know whether two sets $X$ and $Z$ of variables are conditionally independent given a third set $Y$ of variables.
\cite{pearl86,pear88} introduced the \emph{directed separation} (d-separation) algorithm for this task.
d-Separation uses its own specialized terminology such as closed convergent valves in the \emph{directed acyclic graph} (DAG) to determine whether or not an independent holds.
In this paper, we aim to establish computation that is common to both inference and modeling.
We organize the common computation as an algorithm, called \emph{Simple Propagation} (SP).
SP takes the factorization of the BN CPTs and two sets $X$ and $Y$ of variables in the BN.
SP modifies the factorization by removing all variables relevant to $X$ and $Y$.
The output of SP can now be used for both inference and modeling.
Surprisingly, one salient feature of SP is that it performs the bulk of the work leaving only a few steps to be executed for inference and for modeling.
Another advantage of SP is that it brings unified terminology.
Thereby, the work here provides a deeper understanding of BNs.
This paper is organized as follows.
In Section \ref{sec:back}, background is given.
Unifying inference and modeling is done in Section \ref{sec:new}
Section \ref{sec:adv} draws advantages of the new method.
Conclusions are shown in Section \ref{sec:conc}. | {
"alphanum_fraction": 0.7953102101,
"avg_line_length": 73.1071428571,
"ext": "tex",
"hexsha": "3e4f4b1e02ee0d4bf9a00b74c13b419aa0c45040",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "6e05e548cfc76302af36cdb81096822b19c84c99",
"max_forks_repo_licenses": [
"RSA-MD"
],
"max_forks_repo_name": "andreeds/cs807-research-tasks",
"max_forks_repo_path": "B - The Platform/Paper/sections/introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6e05e548cfc76302af36cdb81096822b19c84c99",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"RSA-MD"
],
"max_issues_repo_name": "andreeds/cs807-research-tasks",
"max_issues_repo_path": "B - The Platform/Paper/sections/introduction.tex",
"max_line_length": 175,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "6e05e548cfc76302af36cdb81096822b19c84c99",
"max_stars_repo_licenses": [
"RSA-MD"
],
"max_stars_repo_name": "andreeds/cs807-research-tasks",
"max_stars_repo_path": "B - The Platform/Paper/sections/introduction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 474,
"size": 2047
} |
\documentclass{../../llncs2e/llncs}
\begin{document}
%-------------------------------------
% RELATED WORK
%-------------------------------------
\section*{Related Work}
\label{sec:Related Work}
Cloud and Internet of Things are emerging computing paradigms that features distinctively different computing resources
and system architecture. As its popularity has been growing across the academics and the industry, researchers and developers
are spending a lot of effort to investigate how to integrate these technologies in order to take advantage of the benefits
provided by both of them.\\
IoT applications often encapsulate several relatively complex protocols and involves different software components. Moreover,
they require a significant investment in infrastructure, besides that the system administrators and users spend time with large
client and server installations, setups, or software updates. As most of the computing resources are allocated on the Internet on servers
in Cloud computing, integrating these paradigms in a Cloud-based model results in a solution with more flexibility of implementation,
high scalability and high availability, and with a reduced upfront investment.\\
In RFID-based IoT applications, Dominique at el. \cite{guinard2011cloud} point out that the deployment of RFID applications are cost-intensive mostly
because they involve the deployment of often rather large and heterogeneous distributed systems. As a consequence, these systems are often only
suitable for big corporations and large implementations and do not fit the limited resources of small to mid-size businesses and small scale
applications both in terms of required skill-set and costs. To address this problem, Dominique at el. proposes a Cloud-based solution that
integrates virtualization technologies and the architecture of the Web and its services. They applied the Utility Computing blueprint to the
software stack required by RFID-based IoT applications using the AWS platform and the EC2 service. The Elastic Cloud Computing (EC2) service
allows the creation and management of virtual machines (Amazon Machine Images, or AMIs) that can then be deployed on
demand onto a pool of machines hosted, managed and configured by Amazon. A benefit of this approach is that the server-side hardware
maintenance is delegated to the cloud provider which is often more cost-efficient for smaller businesses. Furthermore,
it also offers better scaling capabilities as the company using the EPC Cloud AMI, can deploy additional and more powerful instances regarding
to the amount of requests.\\
Distefano \cite{distefano2012enabling} at el. proposed a high-level modular architecture to implement the Cloud of Things. According to Distefano at el. things
not only can be discovered and aggregated, but also provided as a service, dynamically, applying the Cloud provisioning model to satisfy the agreed
user requirements and therefore establishing Things as a Service providers. The \textit{Things as a Service} (TaaS) paradigm envisages new scenarios
and innovative, pervasive, value-added applications, disclosing the Cloud of Things world to customers and providers as well, thus enabling an open
marketplace of "things". To address this issues, an ad-hoc infrastructure is required to deal with the management of sensing an actuation, mashed up
resources provided by heterogeneous Clouds, and things, by exploiting well know ontologies and semantic approaches shared by and adopted by users,
customers and providers to detect, identify, map and transform mashed up resources. The proposed architecture provides blocks to deal with all the
related issues, while aiming to provide things according to a service oriented paradigm.\\
CloudThings \cite{zhou2013cloudthings} is an architecture that uses a common approach to integrate Internet of Things and Cloud Computing. The proposed architecture
is an online platform which accommodates IaaS, Paas, and SaaS and allows system integrators and solution providers to leverage a complete IoT application
infrastructure for developing, operating and composing IoT applications and services. The applications consists of three major modules, the CloudThings
service platform, that is a set of Cloud services (IaaS), allowing users to run any applications on Cloud hardware. This service platform dramatically
simplifies the application development, eliminates need for infrastructure development, shortens time to market, and reduces management and maintenance
costs. The CloudThings Developer Suite is a set of Cloud service tools (PaaS) for application development, such as Web service API's, which provide complete
development and deployment capabilities to developers. The CloudThings Operating Portal is a set of Cloud services (SaaS) that support deployment and handle
or support specialized processing services.\\
The effort put in the research to integrate the paradigms of Cloud Computing and Internet of Things resulted in a essential contribution, but there are several
issues regarding to the integration between Cloud Computing and Internet of Things that must be addressed. In particular, due of the heterogeneity of the IoT
applications environments, its hard for solution providers to efficiently deploy and configure applications for a large number of users. Thus, automation for
the management tasks required by IoT applications is a key issue to be explored.\\
TOSCA (Topology and Orchestration Specification for Cloud Applications) \cite{li2013towards} is proposed in order to improve the reusability of service management
processes and automate IoT application deployment in heterogeneous environments. TOSCA is a new cloud standard to formally describe the internal topology of
application components and the deployment process of IoT applications. The structure and management of IT services is specified by a meta-model, which consists
of a \textit{Topology Template}, that is responsible to describe the structure of a service, then there are the \textit{Artifacts}, that describes the files, scripts and
software components necessary to be deployed in order to run the application, and finally the \textit{Plans}, that defined the management process of creating, deploying and
terminating a service. The correct topology and management procedure can be inferred by a TOSCA environment just by interpreting te topology template, this is know
as "declarative" approach. Plans realize an "imperative" approach that explicitly specifies how each management process should be done. The topology templates, plans
and artifacts of an application are packaged in a Cloud Service Archive (.csar file) and deployed in a TOSCA environment, which is able to interpret the models and perform
specified management operation. These .csar files are portable across different cloud providers, which is a great benefit in terms of deployment flexibility.
As a newly established standard to counter growing complexity and isolation in cloud applications environments, TOSCA is gaining momentum in industrial adoption as well academic interests.\\
Breitenb\"{u}cher at el. \cite{breitenbucher2014combining} proposed to combine the two flavors of management supported by TOSCA, \textit{declarative processing} and
\textit{imperative processing}, in order to create a standards-based approach to generate provisioning plans based on TOSCA topology models. The
combination of both flavors would enable applications developers to benefit from automatically provisioning logic based on declarative processing and
individual customization opportunities provided by adapting imperative plans. These provisioning plans are workflows that can be executed fully automatically and
may be customized by application developers after generation. The approach enables to benefit from strengths of both flavors that leads to economical advantages
when developing applications with TOSCA.\\
Recently a growing number of organizations are developing Orchestrators, Design Tools and Cloud Managers based on TOSCA. Juju is an Open Source TOSCA Orchestrator that
can deploy workloads across public, private clouds, and directly onto bare metal. HP Cloud Service Automation is cloud management solution that supports declaratives
services design that are aligned with TOSCA modeling principles. IBM Cloud Orchestrator provides integrated tooling to create TOSCA applications, deploy them with custom
polices, monitoring and scale them in cloud deployments.
\bibliographystyle{../../llncs2e/splncs}
\bibliography{../../bibliography/references}
\end{document}
| {
"alphanum_fraction": 0.8113099335,
"avg_line_length": 110.3544303797,
"ext": "tex",
"hexsha": "d2047170d246f85430e78fec628b218be3bd2c70",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-11-04T05:02:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-02-28T17:02:46.000Z",
"max_forks_repo_head_hexsha": "781510de80f4916b12446d92f30f3d2bd58d0311",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mvpgomes/cloud4things",
"max_forks_repo_path": "docs/drafts/related_work/related_work.tex",
"max_issues_count": 2,
"max_issues_repo_head_hexsha": "781510de80f4916b12446d92f30f3d2bd58d0311",
"max_issues_repo_issues_event_max_datetime": "2020-06-08T18:59:03.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-05-05T19:08:48.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mvpgomes/cloud4things",
"max_issues_repo_path": "docs/drafts/related_work/related_work.tex",
"max_line_length": 192,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "781510de80f4916b12446d92f30f3d2bd58d0311",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mvpgomes/cloud4things",
"max_stars_repo_path": "docs/drafts/related_work/related_work.tex",
"max_stars_repo_stars_event_max_datetime": "2020-04-24T16:43:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-12-15T12:48:43.000Z",
"num_tokens": 1695,
"size": 8718
} |
\newcommand{\Incep}[1]{\textsc{\textbf{Incep}}(#1)}
\newcommand{\Cont}[1]{\textsc{\textbf{Cont}}(#1)}
\newcommand{\Fin}[1]{\textsc{\textbf{Fin}}(#1)}
\newcommand{\Caus}[1]{\textsc{\textbf{Caus}}(#1)}
\newcommand{\Liqu}[1]{\textsc{\textbf{Liqu}}(#1)}
\newcommand{\Perm}[1]{\textsc{\textbf{Perm}}(#1)}
\newcommand{\F}[1]{\textsc{\textbf{#1}}}
\newcommand{\fsimp}[2]{\F{#1}(\E{#2})}
\newcommand{\fmod}[3]{\F{#1}$_{#2}$(\E{#3})}
These functions are a condensed notation to represent relationships in
lexical collocation, though some will correspond to valency changes,
derivations, etc.
\subsection{Structure}
These are used for narrative and pragmatic purposes, textual cohesion,
etc., and are not simply empty. They allow one to shift focus,
saliency, topic. Some of this will be handled by valency tricks in
some languages.
\begin{multicols}{2}
\fmod{Func}{0}{} = \I{lvc} meaning \E{happen to take place} which has
the keyword lexicalized as subject: \E{the possibility exists, time
flies, the day passes by, the rain falls}.
\fmod{Func}{1}{} = \I{lvc} meaning \E{originate from,} connects
keyword as subject with agent as \I{do}: \E{responsibility lies (with
sb), the blow comes (from sb), support comes (from sb).}
\fmod{Func}{1}{blow} = \E{comes from sb}.
\fmod{Func}{2}{} = \I{lvc} meaning \E{concern, apply to,}
connects keyword as subject with the object: \fmod{Func}{2}{blow} =
\E{falls upon sb.}
\fmod{Oper}{1}{} = ``carry out, perform, act, do'' \I{lvc} which
connects the subject and the action as \I{do}: \E{take a bath,
vacation; have a look, bath, shower; give sb/sth a smile, laugh,
shout}. Very common. \fmod{Oper}{1}{attention} = \E{pay}.
\fmod{Oper}{2}{} = ``undergo, meet'' \I{lvc} which connects patient,
recipient, experiencer as subject to action as \I{do}: \E{get a
benefit, have an attack (of a disease), take advice, undergo
inspection}.
\fmod{Oper}{2}{attention} = \E{draw}.
\fmod{Labor}{ij} = \I{lvc} which connects \E{i}th element as subject to
\E{j}th element as \I{do}, with keyword as secondary
object. \fmod{Labor}{12}{interrogation} = \E{to subject sb.\ to an
interrogation;} similar, \E{treat someone with respect}.
\end{multicols}
\subsection{Fulfillment}
All these produce verbs or \I{lvc}s that satisfy ``to fulfill the
requirement of, to do with X what you are required to, X fulfills its
requirement, designed to.'' The fulfillment may not be seen as such by
someone undergoing it: the fulfullment of some disease is death. May
be different terms for different types of fulfillment (psychological,
physical, etc.).
\begin{multicols}{2}
\fmod{Fact}{n}{} = syntactic actant \E{n} filfulls it's own requirement:
\fmod{Fact}{0}{doubt} = \E{be corroborated,}
\fmod{Fact}{0}{knife} = \E{cut,}
\fmod{Fact}{1}{turn} = \E{be someone's turn,}
\fmod{Fact}{2}{ship} = \E{transport people or cargo}.
\fmod{AntiFact}{0}{accusation} = \E{is fabricated,}
\fmod{AntiFact}{1}{accusation} = \E{withdraw,}
\fmod{AntiFact}{2}{accusation} = \E{denies charges.}
\fmod{Real}{1}{} = \I{lvc} for ``act accordingly to the situation, use as
forseen:'' \E{exercise authority, use a telephone, speak a language,
keep a promise}.
\fmod{Real}{2}{} = \I{lvc} for ``react according to the situation,''
\E{respond to an objection, satisfy a requirement, give in to
persuasion, get a joke, confirm a hypothesis.}
\fmod{AntiReal}{} = \E{fail an exam, reject advice, turn down
application}.
\fmod{Labreal}{ij}{} = \I{lvc} corrosponding to \fsimp{Labor}{} above.
\fmod{Labreal}{12}{gallows} = \E{string someone up}. Others: \E{cut
something with a saw, hold something in reserve.}
\fmod{Labreal}{13}{} = \E{burn with shame, waste one's health}.
\end{multicols}
\subsection{Verbal}
Usually from nouns.
\begin{multicols}{2}
\fsimp{Copul}{} = copula: \E{work as a teacher, serve as an example,}
\fsimp{IncepCopul}{ill} = \E{fall ill}.
\fsimp{Involv}{} = involve, affect a non-participant: \E{light floods
the room, snowstorm catches/hits, smell filled the room}.
\fsimp{Manif}{} = manifest, become apparent in someone/something:
\E{joy bursts, scorn drips}.
\fsimp{Prox}{} = be about to, be on the verge of: \E{on the edge of
despair, on the brink of disaster, verge of tears, thunderstorm
brews}. Usually as \fmod{ProxOper}{1}{despair}.
\fsimp{Prepar}{} = prepare X for, get X ready for normal use. Usually
as \fmod{PerparFact}{0}{car} = \E{fill up the car}.
\fsimp{Obstr}{} = function with difficulty: \E{eyes blur, economy
stagnates, short of breath}. \fsimp{CausObstr}{} common: \E{gun
jams, rope tangles, traffic snarls}.
\fsimp{Son}{} = emit characteristic sound: \E{whip cracks, bell chimes,
cane swish, leaf rustles}.
\fsimp{Stop}{} = stop functioning: \E{lose one's breath, voice breaks,
heart stops (or breaks)}.
\fsimp{Excess}{} = function in an abnormally excessive way: \E{heart
has palpitations, engine races, sweat rolls down, teeth grind}.
\fsimp{Sympt}{} = represents bodily reaction to X. Joined in complex
relationships with the rest:
\fsimp{Obstr}{speech} + \fsimp{Sympt}{anger} = \E{sputters with anger;}
\fsimp{Obstr}{breath} + \fsimp{Sympt}{anger} = \E{chokes with anger}.
\end{multicols}
\subsection{Nominal}
~
\begin{multicols}{2}
\fmod{S}{n}{} = the \E{n}th participant (agent noun, object noun,
etc.).
\fmod{S}{1}{teach} = \E{teacher,}
\fmod{S}{2}{teach} = \E{student,}
\fmod{S}{3}{teach} = \E{subject matter.}
More interesting when modified, e.g., \fmod{(AntiBon)S}{1}{}.
In addition are \fmod{S}{\textrm{loc}}{},
\fmod{S}{\textrm{instr}}{},
\fmod{S}{\textrm{mod}}{} (manner, \E{way of life}),
\fmod{S}{\textrm{res}}{} (result, \fmod{S}{\textrm{res}}{split} = \E{crack}),
\fmod{S}{\textrm{med}}{} (means).
\fsimp{Cap}{} = the head of: \E{pope, captain, emperor,}
\fsimp{Cap}{university} = \E{president}.
\fsimp{Equip}{} = staff, crew of: \E{crew, company, personnel}.
\fsimp{Mult}{} = collection of: \E{bouquet, group}.
\fsimp{Sing}{} = unit of entity: \E{rain drop, snowflake, act of violence}.
\fsimp{Pel}{} = covering: \fsimp{Pel}{bean} = \E{pod,}
\fsimp{Pel}{tree} = \E{bark,}
\fsimp{Pel}{book} = \E{cover; binding.}
\fsimp{LiquPel}{bean} = \E{to shell,} \fsimp{LiquPel}{rabbit} = \E{to skin}.
\fsimp{Fas}{} = ``face,'' front: \E{front of house, bow or prow of
ship, nose of plane.} \fsimp{AntiFas}{} = \E{ship stern, tail of
plane}.
\end{multicols}
\subsection{Adjectival}
~
\begin{multicols}{2}
\fmod{A}{n}{} = determining property of \E{n}th participant from the
viewpoint of its role in the situation; quite like participles with verbs.
\fmod{A}{0}{brother} = \E{fraternal,}
\fmod{A}{0}{city} = \E{urban,}
\fmod{A}{1}{delight} = \E{delightful,}
\fmod{A}{1}{anger} = \E{in anger, angry,}
\fmod{A}{2}{shoot} = \E{under fire}.
\fmod{A}{2}{analyze} = \E{under analysis}.
Often more useful modified.
\fmod{Able}{n}{} = can easily, prone to:
\fmod{Able}{1}{cry} = \E{tearful,}
\fmod{Able}{2}{trust} = \E{trustworthy}.
\fmod{Qual}{i}{} = predisposed, of \E{i}th probable argument:
\fmod{Qual}{1}{cry} = \E{sad,}
\fmod{Qual}{1}{laugh} = \E{cheerful,}
\fmod{Qual}{2}{laugh} = \E{awkward, absurd}.
\end{multicols}
\subsection{Evaluation}
May be combined with \F{Anti}: \fsimp{Magn}{temperature} = \E{high},
\fsimp{AntiMagn}{temperature} = \E{low}. May be quite different
depending on word class, and take multiple forms: \fsimp{Magn}{smoker}
= \E{heavy, chain-smoker;} \fsimp{Magn}{to smoke} = \E{like a
chimney}.
\begin{multicols}{2}
\fsimp{Bon}{} = good, generally held praise: \E{neatly cut, heroic
struggle, fruitful analysis}.
\fsimp{Centr}{} = center, culmination: \E{height of the crisis, summit
of glory, prime of life}.
\fsimp{Degrad}{} = degraded, lowered: \E{discipline decays, house becomes
dilapidated, patience wears thin, temper frays, teeth decay}.
\fsimp{Magn}{} = immensely, very: \E{shave close/clean, condemn
strongly, infinite patience}. Might be quantitative or temporal
(speed).
\fsimp{Ver}{} = real, genuine, as it should be, meeting intended
requirements: \E{genuine surprise, walk steadily, loyal citizen,
legitimate demand, precise instrument, well-deserved punishment,
restful sleep}.
\fsimp{AntiVer}{} might have ``too much'' or ``too little'' options.
\end{multicols}
\subsection{Other}
~
\begin{multicols}{2}
\fsimp{Result}{} = the expected result of;
\fsimp{Result}{buy} = \E{own,}
\fsimp{Result}{to have learnt} =\E{know}.
\fmod{Conv}{ijk}{} = converse, reorders arguments.
\fmod{Conv}{21}{include} = \E{belong,}
\fmod{Conv}{21}{precede} = \E{follow}.
\fsimp{Figur}{} = figurative, standard received metaphors: \E{curtain
of rain, pangs of remorse, flames of passion}.
\end{multicols}
\subsection{Modification}
These are only used in combination with others.
\fmod{IncepFunc}{1}{anger} = \E{anger rises}.
\fmod{Cont\-Oper}{1}{power} = \E{retain one's power}.
\fmod{PermFunc}{0}{aggression} = \E{condone aggression}
\begin{multicols}{2}
\Incep{begin, start} (= \fsimp{AntiFin}{})
\Cont{continue, maintain, retain}
\Fin{cease, stop}
\Caus{causative}
\Liqu{liquidate, stop, divert} (= \fsimp{AntiCaus}{})
\Perm{permit, allow, condone}
\F{Plus}(more)
\F{Minus}(less)
\F{Anti}(negates)
\end{multicols}
\noindent Apparently common blends:
\begin{multicols}{2}
\F{AntiMagn}
\fmod{ContFact}{0}{} = \E{luck holds}.
\fmod{CausFunc}{0}{} = \E{find an answer, conduct a campaign, produce
an effect}. Fairly common.
\fmod{CausFunc}{1}{} = \E{open the way, cause damage, give an
answer}. Fairly common. Can be modified:
\fmod{CausPlusFunc}{1}{risk} = \E{increase, raise,}
\fmod{CausMinusFunc}{1}{consumption} = \E{reduce}
\fmod{IncepOper}{1}{} = \E{take an attitude, start a session, obtain a
position}.
\fmod{ContOper}{1}{} = \E{keep silence, follow an example, keep one's
balance, lead a busy life}.
\end{multicols}
\noindent A fuller example: \\
\fmod{IncepOper}{1}{habit} = \E{acquire, form, take to}.\\
\fmod{FinOper}{1}{habit} = \E{drop, get out/rid of}.\\
\fmod{LiquOper}{1}{habit} = \E{break, wean from}.\\
\fmod{Liqu$_{1}$Oper}{1}{habit} = \E{kick, shake off}.\\
\fmod{CausFunc}{1}{habit} = \E{instill into, inculcate}.
\bigskip
These may also produce simultaneous functions,
\F{[Magn + Oper$_{1}$]}(doubt) = \E{be plagued by doubt}.
\F{[Ver + Oper$_{1}$]}(health) = \E{have a clean bill of health}.
| {
"alphanum_fraction": 0.6860397196,
"avg_line_length": 35.2989690722,
"ext": "tex",
"hexsha": "ed48f3a08ac7669ca3d7c2303138c4c58afe5561",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ecb51f3088ce87b932c86e0b3516be09106b7f07",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "wmannis/hypomnemata",
"max_forks_repo_path": "lexfun.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ecb51f3088ce87b932c86e0b3516be09106b7f07",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "wmannis/hypomnemata",
"max_issues_repo_path": "lexfun.tex",
"max_line_length": 77,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "ecb51f3088ce87b932c86e0b3516be09106b7f07",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "wmannis/hypomnemata",
"max_stars_repo_path": "lexfun.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-02T16:48:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-01-23T20:13:36.000Z",
"num_tokens": 3565,
"size": 10272
} |
\section{Steps}
\label{sec:steps}
The core of the RAVEN calculation flow is the \textbf{Step} system.
%
The \textbf{Step} is in charge of assembling different entities in RAVEN (e.g.
Samplers, Models, Databases, etc.) in order to perform a task defined by the
kind of step being used.
%
A sequence of different \textbf{Steps} represents the calculation flow.
%
Before analyzing each \textbf{Step} type, it is worth to 1)
explain how a general \textbf{Step} entity is organized, and 2) introduce the concept of step
``role'' .
%
In the following example, a general example of a \textbf{Step} is shown below:
\begin{lstlisting}[style=XML,morekeywords={class}]
<Simulation>
...
<Steps>
...
<WhatEverStepType name='aName'>
<Role1 class='aMainClassType' type='aSubType'>userDefinedName1</Role1>
<Role2 class='aMainClassType' type='aSubType'>userDefinedName2</Role2>
<Role3 class='aMainClassType' type='aSubType'>userDefinedName3</Role3>
<Role4 class='aMainClassType' type='aSubType'>userDefinedName4</Role4>
</WhatEverStepType>
...
</Steps>
...
</Simulation>
\end{lstlisting}
As shown above each \textbf{Step} consists of
a list of entities organized into ``Roles.''
%
Each role represents a behavior the entity (object) will assume during the
evaluation of the \textbf{Step}.
%
In RAVEN, several different roles are available:
\begin{itemize}
\item \textbf{Input} represents the input of the \textbf{Step}.
The allowable input objects depend on the type of \textbf{Model} in the
\textbf{Step}.
\item \textbf{Output} defines where to collect the results of an action
performed by the \textbf{Model}.
It is generally one of the following types: \textbf{DataObjects}, \textbf{Databases},
or \textbf{OutStreams}.
\item \textbf{Model} represents a physical or mathematical system or behavior.
The object used in this role defines the allowable types of
\textbf{Inputs} and \textbf{Outputs} usable in this step.
\item \textbf{Sampler} defines the sampling strategy to be used to probe the model.
\\ It is worth to mention that, when a sampling strategy is employed, the ``variables'' defined in the \xmlNode{variable} blocks are going to be
directly placed in the \textbf{Output} objects of type \textbf{DataObjects} and \textbf{Databases}).
\item \textbf{Function} is an extremely important role. It introduces the capability to
perform pre or post processing of Model \textbf{Inputs} and \textbf{Outputs}. Its specific
behavior depends on the \textbf{Step} is using it.
%\item \textbf{Function.} The Function role is extremely important, for example, when performing Adaptive Sampling to represent the metric of the transition regions. This role is the role used, for example, to collapse information coming from a Model.
\item \textbf{ROM} defines an acceleration Reduced Order Model to use for a
\textbf{Step}.
\item \textbf{SolutionExport} the DataObject to store solutions from Optimizer or Sampler execution
in a Step. For example, a LimitSurfaceSearch Sampler outputs the coordinates of the limit surface;
similarly, an Optimizer outputs the convergence history and optimal points.
See specific Samplers and Optimizers for details.
\end{itemize}
Depending on the \textbf{Step} type, different combinations of these roles can
be used.
For this reason, it is important to analyze each \textbf{Step} type in details.
The available steps are the following
\begin{itemize}
\item SingleRun (see Section~\ref{subsec:stepSingleRun})
\item MultiRun(see Section~\ref{subsec:stepMultiRun})
\item IOStep(see Section~\ref{subsec:stepIOStep})
\item RomTrainer(see Section~\ref{subsec:stepRomTrainer})
\item PostProcess(see Section~\ref{subsec:stepPostProcess})
\end{itemize}
%%%%%%%%%%%%%%%%%%%%
%%%%% SINGLERUN %%%%%
%%%%%%%%%%%%%%%%%%%%
\subsection{SingleRun}
\label{subsec:stepSingleRun}
The \textbf{SingleRun} is the simplest step the user can use to assemble a
calculation flow: perform a single action of a \textbf{Model}.
%
For example, it can be used to run a single job (Code Model) and collect the
outcome(s) in a ``\textbf{DataObjects}'' object of type \textbf{Point} or
\textbf{History} (see Section~\ref{sec:DataObjects} for more details on available data
representations).
The specifications of this Step must be defined within a \xmlNode{SingleRun} XML
block.
%
This XML node has the following definable attributes:
\vspace{-5mm}
\begin{itemize}
\itemsep0em
\item \xmlAttr{name}, \xmlDesc{required string attribute}, user-defined name of
this \textbf{Step}. \nb This name is used to reference this specific entity
in the \xmlNode{RunInfo} block, under the \xmlNode{Sequence} node. If the name
of this \textbf{Step} is not listed in the \xmlNode{Sequence} block, its action is not
going to be performed.
\item \xmlAttr{repeatFailureRuns}, \xmlDesc{optional integer attribute}, this optional
attribute could be used to set a certain number of repetitions that need to be performed
when a realization (i.e. run) fails (e.g. \xmlAttr{repeatFailureRuns} = ``3'', 3 tries).
\item \xmlAttr{pauseAtEnd}, \xmlDesc{optional boolean/string attribute (case insensitive)}, if True
(True values = True, yes, y, t), the code will pause at the end of
the step, waiting for a user signal to continue. This is used in case one or
more of the \textbf{Outputs} are of type \textbf{OutStreams}.
For example, it can be used when an \textbf{OutStreams} of type
\textbf{Plot} is output to the screen. Thus, allowing the user to interact with
the \textbf{Plot} (e.g. rotate the figure, change the scale, etc.).
\default{False}.
\item \xmlAttr{clearRunDir}, \xmlDesc{optional boolean attribute}, indicates whether the run
directory should be cleared (removed) before beginning
the Step calculation. The run directory has the same \xmlAttr{name} as the \xmlNode{Step} and is
located within the \xmlNode{WorkingDir}. Note this directory is only used for a \xmlNode{Step}
with certain \xmlNode{Model} types, such as \xmlNode{Code}.
\end{itemize}
In the \xmlNode{SingleRun} input block, the user needs to specify the objects
needed for the different allowable roles.
%
This step accepts the following roles:
\begin{itemize}
\item \xmlNode{Input}, \xmlDesc{string, required parameter}, names an entity
(defined elsewhere in the RAVEN input) that will be used as input for the model
specified in this step.
This XML node accepts the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
For example, \xmlString{Files}, \xmlString{DataObjects}, \xmlString{Databases},
etc.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the main object
class.
For example, if the \xmlAttr{class} attribute is \xmlString{DataObjects}, the
\xmlAttr{type} attribute might be \xmlString{PointSet}.
\nb The class \xmlString{Files} has no type (i.e.
\xmlAttr{type}\textbf{\texttt{=''}}).
\end{itemize}
\nb The \xmlAttr{class} and, consequently, the \xmlAttr{type} usable for this
role depends on the particular \xmlNode{Model} being used.
%
In addition, the user can specify as many \xmlNode{Input} nodes as needed.
\item \xmlNode{Model}, \xmlDesc{string, required parameter}, names an entity
defined elsewhere in the input file to be used as a model for this step.
This XML node accepts the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
For this role, only \xmlString{Models} can be used.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the \texttt{Models}
object class.
For example, the \xmlAttr{type} attribute might be \xmlString{Code},
\xmlString{ROM}, etc.
\end{itemize}
\item \xmlNode{Output}, \xmlDesc{string, required parameter} names an entity
defined elsewhere in the input to use as the output for the \textbf{Model}.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
For this role, only \xmlString{DataObjects}, \xmlString{Databases}, and
\xmlString{OutStreams} can be used.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the main object
class.
For example, if the \xmlAttr{class} attribute is \xmlString{DataObjects}, the
\xmlAttr{type} attribute might be \xmlString{PointSet}.
\end{itemize}
\nb The number of \xmlNode{Output} nodes is unlimited.
\end{itemize}
Example:
\begin{lstlisting}[style=XML,morekeywords={class,pauseAtEnd}]
<Steps>
...
<SingleRun name='StepName' pauseAtEnd='false'>
<Input class='Files' type=''>anInputFile.i</Input>
<Input class='Files' type=''>aFile</Input>
<Model class='Models' type='Code'>aCode</Model>
<Output class='Databases' type='HDF5'>aDatabase</Output>
<Output class='DataObjects' type='History'>aData</Output>
</SingleRun>
...
</Steps>
\end{lstlisting}
%%%%%%%%%%%%%%%%%%%
%%%%% MULTIRUN %%%%%
%%%%%%%%%%%%%%%%%%%
\subsection{MultiRun}
\label{subsec:stepMultiRun}
The \textbf{MultiRun} step allows the user to assemble the calculation flow of
an analysis that requires multiple ``runs'' of the same model.
%
This step is used, for example, when the input (space) of the model needs to be
perturbed by a particular sampling strategy.
%
The specifications of this type of step must be defined within a
\xmlNode{MultiRun} XML block.
%
This XML node recognizes the following list of attributes:
\vspace{-5mm}
\begin{itemize}
\itemsep0em
\item \xmlAttr{name}, \xmlDesc{required string attribute}, user-defined name of
this Step.
\nb As with other objects, this name is used to reference this specific entity
in the \xmlNode{RunInfo} block, under the \xmlNode{Sequence} node. If the name
of this \textbf{Step} is not listed in the \xmlNode{Sequence} block, its action is not
going to be performed.
\item \xmlAttr{re-seeding}, \xmlDesc{optional integer/string attribute}, this optional
attribute could be used to control the seeding of the random number generator (RNG).
If inputted, the RNG can be reseeded. The value of this attribute
can be: either 1) an integer value with the seed to be used (e.g. \xmlAttr{re-seeding} =
``20021986''), or 2) string value named ``continue'' where the RNG is not re-initialized
\item \xmlAttr{repeatFailureRuns}, \xmlDesc{optional integer attribute}, this optional
attribute could be used to set a certain number of repetitions that need to be performed
when a realization (i.e. run) fails (e.g. \xmlAttr{repeatFailureRuns} = ``3'', 3 tries).
\item \xmlAttr{pauseAtEnd}, \xmlDesc{optional boolean/string attribute}, if True
(True values = True, yes, y, t), the code will pause at the end of
the step, waiting for a user signal to continue. This is used in case one or
more of the \textbf{Outputs} are of type \textbf{OutStreams}.
For example, it can be used when an \textbf{OutStreams} of type
\textbf{Plot} is output to the screen. Thus, allowing the user to interact with
the \textbf{Plot} (e.g. rotate the figure, change the scale, etc.).
\item \xmlAttr{sleepTime}, \xmlDesc{optional float attribute}, in this attribute
the user can specify the waiting time (seconds) between two subsequent inquiries
of the status of the submitted job (i.e. check if a run has finished).
\default{0.05}.
\end{itemize}
\vspace{-5mm}
In the \xmlNode{MultiRun} input block, the user needs to specify the objects
that need to be used for the different allowable roles.
%
This step accepts the following roles:
\vspace{-5mm}
\begin{itemize}
\item \xmlNode{Input}, \xmlDesc{string, required parameter}, names an entity to
be used as input for the model specified in this step.
This XML node accepts the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
For example, \xmlString{Files}, \xmlString{DataObjects}, \xmlString{Databases},
etc.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute specifies the object type within the main object class.
For example, if the \xmlAttr{class} attribute is \xmlString{DataObjects}, the
\xmlAttr{type} attribute might be \xmlString{PointSet}.
\nb The class \xmlString{Files} has no type (i.e.
\xmlAttr{type}\textbf{\texttt{=''}}).
\end{itemize}
\nb The \xmlAttr{class} and, consequently, the \xmlAttr{type} usable for this
role depend on the particular \xmlNode{Model} being used.
The user can specify as many \xmlNode{Input} nodes as needed.
\item \xmlNode{Model}, \xmlDesc{string, required parameter} names an entity
defined elsewhere in the input that will be used as the model for this step.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
For this role, only \xmlString{Models} can be used.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the \texttt{Models}
object class.
For example, the \xmlAttr{type} attribute might be \xmlString{Code},
\xmlString{ROM}, etc.
\end{itemize}
\item \xmlNode{Sampler}, \xmlDesc{string, optional parameter} names an entity
defined elsewhere in the input file to be used as a sampler.
As mentioned in Section \ref{sec:Samplers}, the \textbf{Sampler} is in charge of
defining the strategy to characterize the input space.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used.
Only \xmlString{Samplers} can be used for this role.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the \texttt{Samplers}
object class.
For example, the \xmlAttr{type} attribute might be \xmlString{MonteCarlo},
\xmlString{Adaptive}, \xmlString{AdaptiveDET}, etc.
See Section \ref{sec:Samplers} for all the different types currently
supported.
\end{itemize}
\item \xmlNode{Optimizer}, \xmlDesc{string, optional parameter} names an entity
defined elsewhere in the input file to be used as an optimizer.
As mentioned in Section \ref{sec:Optimizers}, the \textbf{Optimizer} is in charge of
defining the strategy to optimize an user-specified variable.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used.
Only \xmlString{Optimizers} can be used for this role.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the \texttt{Optimizers}
object class.
For example, the \xmlAttr{type} attribute might be \xmlString{SPSA}, etc.
See Section \ref{sec:Optimizers} for all the different types currently
supported.
\end{itemize}
\nb For Multi-Run, either one \xmlNode{Sampler} or one \xmlNode{Optimizer} is required.
\item \xmlNode{SolutionExport}, \xmlDesc{string, optional (Sampler) or required (Optimizer) parameter} identifies
an entity to be used for exporting key information coming from the
\textbf{Sampler} or \textbf{Optimizer} object during the simulation. This node is \textbf{Required}
when an \textbf{Optimizer} is used.
This XML node accepts the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
For this role, only \xmlString{DataObjects} can be used.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the \texttt{DataObjects}
object class.
For example, the \xmlAttr{type} attribute might be \xmlString{PointSet},
\xmlString{HistorySet}, etc. \\
\nb Whether or not it is possible to export the \textbf{Sampler} solution
depends on the \xmlAttr{type}.
Currently, only the Samplers in the \xmlString{Adaptive} category and all Optimizers will
export their solution into a \xmlNode{SolutionExport} entity. For Samplers, the \xmlNode{Outputs} node
in the \texttt{DataObjects} needs to contain the goal \xmlNode{Function} name.
For example, if \xmlNode{Sampler} is of type \xmlString{Adaptive}, the
\xmlNode{SolutionExport} needs to be of type \xmlString{PointSet} and
it will contain the coordinates, in the input space, that belong to the
``Limit Surface''. For Optimizers, the
\xmlNode{SolutionExport} needs to be of type \xmlString{HistorySet} and it will contains all the optimization trajectories, each as a history, that record how the variables are updated along each optimization trajectory.
\end{itemize}
\item \xmlNode{Output}, \xmlDesc{string, required parameter} identifies an
entity to be used as output for this step.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
For this role, only \xmlString{DataObjects}, \xmlString{Databases}, and
\xmlString{OutStreams} may be used.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute specifies the object type within the main object class.
For example, if the \xmlAttr{class} attribute is \xmlString{DataObjects}, the
\xmlAttr{type} attribute might be \xmlString{PointSet}.
\end{itemize}
\nb The number of \xmlNode{Output} nodes is unlimited.
\end{itemize}
Example:
\begin{lstlisting}[style=XML,morekeywords={pauseAtEnd,sleepTime,class}]
<Steps>
...
<MultiRun name='StepName1' pauseAtEnd='False' sleepTime='0.01'>
<Input class='Files' type=''>anInputFile.i</Input>
<Input class='Files' type=''>aFile</Input>
<Sampler class='Samplers' type = 'Grid'>aGridName</Sampler>
<Model class='Models' type='Code'>aCode</Model>
<Output class='Databases' type='HDF5'>aDatabase</Output>
<Output class='DataObjects' type='History'>aData</Output>
</MultiRun >
<MultiRun name='StepName2' pauseAtEnd='True' sleepTime='0.02'>
<Input class='Files' type=''>anInputFile.i</Input>
<Input class='Files' type=''>aFile</Input>
<Sampler class='Samplers' type='Adaptive'>anAS</Sampler>
<Model class='Models' type='Code'>aCode</Model>
<Output class='Databases' type='HDF5'>aDatabase</Output>
<Output class='DataObjects' type='History'>aData</Output>
<SolutionExport class='DataObjects' type='PointSet'>
aTPS
</SolutionExport>
</MultiRun>
...
</Steps>
\end{lstlisting}
%%%%%%%%%%%%%%%%%%%%
%%%%% IOStep %%%%%
%%%%%%%%%%%%%%%%%%%%
\subsection{IOStep}
\label{subsec:stepIOStep}
As the name suggests, the \textbf{IOStep} is the step where the user can perform
input/output operations among the different I/O entities available in RAVEN.
%
This step type is used to:
\begin{itemize}
\item construct/update a \textit{Database} from a \textit{DataObjects} object, and
vice versa;
\item construct/update a \textit{DataObject} from a
\textit{CSV} file contained in a directory;
\item construct/update a \textit{Database} or a \textit{DataObjects} object from
\textit{CSV} files contained in a directory;
\item stream the content of a \textit{Database} or a \textit{DataObjects} out through
an \textbf{OutStream} object (see section \ref{sec:outstream});
\item store/retrieve a \textit{ROM} or \textit{ExternalModel} to/from an external \textit{File} using Pickle module
of Python. This function can be used to create and store ExternalModel or mathematical model (ROM) of fast solution
trained to predict a response of interest of a physical system. These models can be
recovered in other simulations or used to evaluate the response of a physical system
in a Python program by the implementing of the Pickle module.
\item export a \textit{ROM} or \textit{ExternalModel} to an external \textit{FMI/FMU} \textit{File} using the RAVEN native
\textit{FMI/FMU} exporting capability. Note that \textit{ExternalModels} must implement a \texttt{runStep} function for calculating the step when running as an \textit{FMU}. This is choosen by \verb|type="FMU"| for the FMU file in the Files section of the input file. See also the notes at \ref{subsubsec:FMUnotes}.
\end{itemize}
%
The specifications of this type of step must be defined within an
\xmlNode{IOStep} XML block.
%
This XML node can accept the following attributes:
\vspace{-5mm}
\begin{itemize}
\itemsep0em
\item \xmlAttr{name}, \xmlDesc{required string attribute}, user-defined name of
this Step.
\nb As for the other objects, this is the name that can be used to refer to
this specific entity in the \xmlNode{RunInfo} block, under the
\xmlNode{Sequence} node.
\item \xmlAttr{pauseAtEnd}, \xmlDesc{optional boolean/string attribute (case insensitive)}, if True
(True values = True, yes, y, t), the code will pause at the end of
the step, waiting for a user signal to continue. This is used in case one or
more of the \textbf{Outputs} are of type \textbf{OutStreams}.
For example, it can be used when an \textbf{OutStreams} of type
\textbf{Plot} is output to the screen. Thus, allowing the user to interact
with the \textbf{Plot} (e.g. rotate the figure, change the scale, etc.).
\default{False}.
\item \xmlAttr{fromDirectory}, \xmlDesc{optional string attribute}, The directory
where the input files can be found when loading data from a file or series of
files directly into a DataObject.
\end{itemize}
\vspace{-5mm}
In the \xmlNode{IOStep} input block, the user specifies the objects that need to
be used for the different allowable roles.
This step accepts the following roles:
\begin{itemize}
\item \xmlNode{Input}, \xmlDesc{string, required parameter}, names an entity
that is going to be used as a source (input) from which the information needs
to be extracted.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
As already mentioned, the allowable main classes are \xmlString{DataObjects},
\xmlString{Databases}, \xmlString{Models} and \xmlString{Files}.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the main object
class.
For example, if the \xmlAttr{class} attribute is \xmlString{DataObjects}, the
\xmlAttr{type} attribute might be \xmlString{PointSet}. If the \xmlAttr{class} attribute is \xmlString{Models}, the
\xmlAttr{type} attribute must be \xmlString{ROM} or \xmlString{ExternalModel} and if the \xmlAttr{class} attribute is \xmlString{Files}, the
\xmlAttr{type} attribute must be \xmlString{ }
\end{itemize}
\item \xmlNode{Output}, \xmlDesc{string, required parameter} names an entity to
be used as the target (output) where the information extracted in the input
will be stored.
This XML node needs to contain the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
The allowable main classes are \xmlString{DataObjects}, \xmlString{Databases}, \xmlString{OutStreams}, \xmlString{Models} and \xmlString{Files}.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute specifies the object type within the main object class.
For example, if the \xmlAttr{class} attribute is
\xmlString{OutStreams}, the \xmlAttr{type} attribute might be
\xmlString{Plot}.
\end{itemize}
\end{itemize}
This step acts as a ``transfer network'' among the different RAVEN storing
(or streaming) objects.
%
The number of \xmlNode{Input} and \xmlNode{Output} nodes is unlimited, but
should match.
%
This step assumes a 1-to-1 mapping (e.g. first \xmlNode{Input} is going to be
used for the first \xmlNode{Output}, etc.).
\\
\nb This 1-to-1 mapping is not present when \xmlNode{Output} nodes are of
\xmlAttr{class}\\\xmlString{OutStreams}, since \textbf{OutStreams}
objects are already linked to a Data object in the relative RAVEN input block.
%
In this case, the user needs to provide all of the
``DataObjects'' objects linked to the OutStreams objects (see the example
below) in the \xmlNode{Input} nodes.
\begin{lstlisting}[style=XML,morekeywords={class}]
<Steps>
...
<IOStep name='OutStreamStep'>
<Input class='DataObjects' type='HistorySet'>aHistorySet</Input>
<Input class='DataObjects' type='PointSet'>aTPS</Input>
<Output class='OutStreams' type='Plot'>plot_hist
</Output>
<Output class='OutStreams' type='Print'>print_hist
</Output>
<Output class='OutStreams' type='Print'>print_tps
</Output>
<Output class='OutStreams' type='Print'>print_tp
</Output>
</IOStep>
...
<IOStep name='PushDataObjectsIntoDatabase'>
<Input class='DataObjects' type='HistorySet'>aHistorySet</Input>
<Input class='DataObjects' type='PointSet'>aTPS</Input>
<Output class='Databases' type='NetCDF'>aDatabase</Output>
<Output class='Databases' type='HDF5'>aDatabase</Output>
</IOStep>
...
</Steps>
\end{lstlisting}
%
A summary of the objects that can go from/to other objects is shown in Table \ref{tab:IOSTEP}:
\begin{table}[h!]
\centering
\begin{tabular}{l|l|l}
\xmlNode{Input} & \xmlNode{Output} & Resulting behavior \\ \hline
DataObject & Database & Store to On-Disk Database \\
& OutStream & Print or Plot Data \\ \hline
Database & DataObject & Load from On-Disk Database \\ \hline
File & DataObject & Load from On-Disk CSV \\
& ExternalModel & Load On-Disk Serialized ExternalModel \\
& ROM & Load On-Disk Serialized ROM \\ \hline
ROM & DataObject & Print ROM Metadata to CSV, XML \\
& File & Serialize ROM to Disk \\
& File & If \emph{type} is \texttt{fmu}, Serialize ROM to FMU\\ \hline
ExternalModel & File & Serialize ExternalModel to Disk \\
& File & If \emph{type} is \texttt{fmu}, Serialize ExternalModel to FMU
\end{tabular}
\caption{Object options for \xmlNode{IOStep} operations}
\label{tab:IOSTEP}
\end{table}
As already mentioned, the \xmlNode{IOStep} can be used to export (serialize) a ROM or ExternalModel
in a binary file. To use the exported ROM or ExternalModel in an external Python (or
Python-compatible) code, the RAVEN framework must be present in end-user machine.
The main reason for this is that the \textit{Pickle} module uses the class definitions to template
the reconstruction of the serialized object in memory.
\\ In order to facilitate the usage of the serialized ROM or ExternalModel in an external Python code, the RAVEN
team provided a utility class contained in :
\begin{lstlisting}[language=bash]
./raven/scripts/externalROMloader.py
\end{lstlisting}
An example of how to use this utility class to load and use a serialized ROM (already trained) or ExternalModel is reported below:
%
Example Python Function:
\begin{lstlisting}[language=python]
from externalROMloader import ravenROMexternal
import numpy as np
rom = ravenROMexternal("path_to_pickled_rom/ROM.pk",
"path_to_RAVEN_framework")
request = {"x1":np.atleast_1d(Value1),"x2":np.atleast_1d(Value2)}
eval = rom.evaluate(request)
print str(eval)
\end{lstlisting}
The module above can also be used to evaluate a ROM or ExternalModel from input file:
\begin{lstlisting}[language=bash]
python ./raven/scripts/externalROMloader.py input_file.xml
\end{lstlisting}
The input file has the following format:
\begin{lstlisting}[style=XML,morekeywords={class}]
<?xml version="1.0" ?>
<external_rom>
<RAVENdir>path_to_RAVEN_framework</RAVENdir>
<ROMfile>ath_to_pickled_rom/ROM.pk</ROMfile>
<evaluate>
<x1>0. 1. 0.5</x1>
<x2>0. 0.4 2.1</x2>
</evaluate>
<inspect>true</inspect>
<outputFile>output_file_name</outputFile>
</external_rom>
\end{lstlisting}
The output of the above command would look like as follows:
\begin{lstlisting}[style=XML,morekeywords={class}]
<?xml version="1.0" ?>
<UROM>
<settings>
<Target>ans</Target>
<name>UROM</name>
<IndexSet>TensorProduct</IndexSet>
<Features>[u'x1' u'x2']</Features>
<PolynomialOrder>2</PolynomialOrder>
</settings>
<evaluations>
<evaluation realization="1">
<x2>0.0</x2>
<x1>0.0</x1>
<ans>-3.1696867353e-14</ans>
</evaluation>
<evaluation realization="2">
<x2>0.4</x2>
<x1>1.0</x1>
<ans>1.4</ans>
</evaluation>
<evaluation realization="3">
<x2>2.1</x2>
<x1>0.5</x1>
<ans>2.6</ans>
</evaluation>
</evaluations>
</UROM>
\end{lstlisting}
\subsubsection{FMU Notes}
\label{subsubsec:FMUnotes}
The FMU exporter is currently experimental. In order to install it
optional libraries need to be installed with \verb|--optional| added
as a parameter (and \verb|./build_raven| will need to be run).
Example:
\begin{lstlisting}[language=bash]
./scripts/establish_conda_env.sh --install --optional
./build_raven
\end{lstlisting}
In addition, in order use the FMU that is generated by RAVEN, it needs
to be in a RAVEN enviroment. The way RAVEN generated FMUs have been
tested is by using:
\begin{lstlisting}[language=bash]
source ./scripts/establish_conda_env.sh --load
python load_and_run_fmu.py
\end{lstlisting}
where \verb|load_and_run_fmu.py| is a python program that uses fmpy to
use the generated fmu.
%%%%%%%%%%%%%%%%%%%%
%%%%% ROM %%%%%
%%%%%%%%%%%%%%%%%%%%
\subsection{RomTrainer}
\label{subsec:stepRomTrainer}
The \textbf{RomTrainer} step type performs the training of a Reduced Order
Model (aka Surrogate Mode).
%
The specifications of this step must be defined within a \xmlNode{RomTrainer}
block.
%
This XML node accepts the attributes:
\vspace{-5mm}
\begin{itemize}
\itemsep0em
\item \xmlAttr{name}, \xmlDesc{required string attribute}, user-defined name of
this step.
\nb As for the other objects, this is the name that can be used to refer to this
specific entity in the \xmlNode{RunInfo} block under \xmlNode{Sequence}.
\end{itemize}
\vspace{-5mm}
In the \xmlNode{RomTrainer} input block, the user will specify the objects
needed for the different allowable roles.
%
This step accepts the following roles:
\begin{itemize}
\item \xmlNode{Input}, \xmlDesc{string, required parameter} names an entity to
be used as a source (input) from which the information needs to be extracted.
This XML node accepts the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
The only allowable main class is \xmlString{DataObjects}.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute specifies the object type within the main object class.
For example, the \xmlAttr{type} attribute might be \xmlString{PointSet}.
\nb Depending on which type of \xmlString{DataObjects} is used, the ROM
will be a Static or Dynamic (i.e. time-dependent) model. This implies that
both \xmlString{PointSet} and \xmlString{HistorySet} are allowed (but not
\xmlString{DataSet} yet).
\end{itemize}
\item \xmlNode{Output}, \xmlDesc{string, required parameter}, names a ROM entity
that is going to be trained.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main objects type used in the
input.
The only allowable main class is \xmlString{Models}.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the main object
class.
The only type accepted here is, currently, \xmlString{ROM}.
\end{itemize}
\end{itemize}
Example:
\begin{lstlisting}[style=XML,morekeywords={class}]
<Steps>
...
<RomTrainer name='aStepNameStaticROM'>
<Input class='DataObjects' type='PointSet'>aPS</Input>
<Output class='Models' type='ROM' >aROM</Output>
</RomTrainer>
<RomTrainer name='aStepNameTimeDependentROM'>
<Input class='DataObjects' type='HistorySet'>aHS</Input>
<Output class='Models' type='ROM' >aTimeDepROM</Output>
</RomTrainer>
...
</Steps>
\end{lstlisting}
%%%%%%%%%%%%%%%%%%%%
%%%%% PostProcess %%%%%
%%%%%%%%%%%%%%%%%%%%
\subsection{PostProcess}
\label{subsec:stepPostProcess}
The \textbf{PostProcess} step is used to post-process data or manipulate RAVEN
entities.
%
It is aimed at performing a single action that is employed by a
\textbf{Model} of type \textbf{PostProcessor}.
%
The specifications of this type of step is defined within a
\xmlNode{PostProcess} XML block.
%
This XML node specifies the following attributes:
\vspace{-5mm}
\begin{itemize}
\itemsep0em
\item \xmlAttr{name}, \xmlDesc{required string attribute}, user-defined name of
this Step.
\nb As for the other objects, this is the name that is used to refer to
this specific entity in the \xmlNode{RunInfo} block under the
\xmlNode{Sequence} node.
\item \xmlAttr{pauseAtEnd}, \xmlDesc{optional boolean/string attribute (case insensitive)}, if True
(True values = True, yes, y, t), the code will pause at the end of
the step, waiting for a user signal to continue. This is used in case one or
more of the \textbf{Outputs} are of type \textbf{OutStreams}.
For example, it can be used when an \textbf{OutStreams} of type
\textbf{Plot} is output to the screen. Thus, allowing the user to interact
with the \textbf{Plot} (e.g. rotate the figure, change the scale, etc.).
\default{False}.
\end{itemize}
\vspace{-5mm}
In the \xmlNode{PostProcess} input block, the user needs to specify the objects
needed for the different allowable roles.
%
This step accepts the following roles:
\begin{itemize}
\item \xmlNode{Input}, \xmlDesc{string, required parameter}, names an entity to
be used as input for the model specified in this step.
This XML node accepts the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
For example, \xmlString{Files}, \xmlString{DataObjects}, \xmlString{Databases},
etc.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute specifies the object type within the main object class.
For example, if the \xmlAttr{class} attribute is \xmlString{DataObjects},
the \xmlAttr{type} attribute might be \xmlString{PointSet}.
\nb The class \xmlString{Files} has no type (i.e.
\xmlAttr{type}\textbf{\texttt{=''}}).
\end{itemize}
\nb The \xmlAttr{class} and, consequently, the \xmlAttr{type} usable for this
role depends on the particular type of \textbf{PostProcessor} being used.
In addition, the user can specify as many \xmlNode{Input} nodes as needed by the
model.
\item \xmlNode{Model}, \xmlDesc{string, required parameter}, names an entity to
be used as a model for this step.
This XML node recognizes the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
For this role, only \xmlString{Models} can be used.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute needs to specify the object type within the
\xmlString{Models} object class.
The only type accepted here is \xmlString{PostProcessor}.
\end{itemize}
\item \xmlNode{Output}, \xmlDesc{string, required/optional parameter}, names an
entity to be used as output for the PostProcessor.
The necessity of this XML block and the types of entities that can be used as
output depend on the type of \textbf{PostProcessor} that has been used as a
\textbf{Model} (see section \ref{sec:models_postProcessor}).
This XML node specifies the following attributes:
\begin{itemize}
\item \xmlAttr{class}, \xmlDesc{required string attribute}, main object class
type.
This string corresponds to the tag of the main object's type used in the
input.
\item \xmlAttr{type}, \xmlDesc{required string attribute}, the actual entity
type.
This attribute specifies the object type within the main object class.
For example, if the \xmlAttr{class} attribute is \xmlString{DataObjects}, the
\xmlAttr{type} attribute might be \xmlString{PointSet}.
\end{itemize}
\nb The number of \xmlNode{Output} nodes is unlimited.
\end{itemize}
Example:
\begin{lstlisting}[style=XML,morekeywords={class}]
<Steps>
...
<PostProcess name='PP1'>
<Input class='DataObjects' type='PointSet' >aData</Input>
<Model class='Models' type='PostProcessor'>aPP</Model>
<Output class='Files' type=''>anOutputFile</Output>
</PostProcess>
...
</Steps>
\end{lstlisting}
| {
"alphanum_fraction": 0.7328001667,
"avg_line_length": 45.644470868,
"ext": "tex",
"hexsha": "16f9a1c4122b7023848aa8a5730e3360f3244c6b",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bd7fca18af94376a28e2144ba1da72c01c8d343c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "FlanFlanagan/raven",
"max_forks_repo_path": "doc/user_manual/step.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bd7fca18af94376a28e2144ba1da72c01c8d343c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "FlanFlanagan/raven",
"max_issues_repo_path": "doc/user_manual/step.tex",
"max_line_length": 319,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "bd7fca18af94376a28e2144ba1da72c01c8d343c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "FlanFlanagan/raven",
"max_stars_repo_path": "doc/user_manual/step.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-10T18:54:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-10T18:54:09.000Z",
"num_tokens": 10338,
"size": 38387
} |
\section{Invariant Measures}
\begin{definition}
A measure is a tuple $(\lambda_i)_{i\in I}$ with $\lambda_i\ge 0$ for all $i\in I$.\\
A measure $\lambda$ is invariant (or stationary/equilibrium) if $\lambda P=\lambda$.
\end{definition}
\begin{theorem}
Let $(X_n)_{n\ge 0}\sim\operatorname{Markov}(\lambda,P)$.
Suppose $\lambda$ is invariant.
Then $(X_{n+m})_{n\ge 0}$ is also $\operatorname{Markov}(\lambda,P)$.
\end{theorem}
\begin{proof}
Quite obvious, but let's check.
For any $i$ we have, by definition,
$$\mathbb P[X_m=i]=(\lambda P^m)_i=\lambda_i$$
So the initial distribution of $(X_{n+m})_{n\ge 0}$ is $\lambda$.
Also, conditional on $X_{n+m}=i$, by the Markokv property of $(X_n)$, $X_{n+m+1}$ is independent of $X_m,\ldots,X_{n+m}$ and it has distribution $(p_{ij})_{j\in I}$.
\end{proof}
\begin{theorem}\label{power_inv}
Suppose $I$ is finite.
If there is some $i\in I$ such that $p_{ij}^{(n)}\to\pi_j$ as $n\to\infty$ for any $j\in I$, then $(\pi_j)$ is an invariant distribution.
\end{theorem}
\begin{proof}
It is obviously a distribution as
$$\sum_{j\in I}\pi_j=\sum_{j\in I}\lim_{n\to\infty}p_{ij}^{(n)}=\lim_{n\to\infty}\sum_{j\in I}p_{ij}^{(n)}=1$$
since $I$ is finite.
To see it is invariant,
\begin{align*}
\pi_j&=\lim_{n\to\infty}p_{ij}^{(n)}=\lim_{n\to\infty}p_{ij}^{(n+1)}=\lim_{n\to\infty}\sum_{k\in I}p_{ik}^{(n)}p_{kj}=\sum_{k\in I}p_{kj}\lim_{n\to\infty}p_{ik}^{(n)}\\
&=\sum_{k\in I}p_{kj}\pi_k=(\pi P)_j
\end{align*}
again because $I$ is finite.
\end{proof}
\begin{remark}
The theorem fails in general for infinite $I$.
Take for example the simple symmetric random walk on $\mathbb Z^d$.
We have $p_{ij}^{(n)}\to 0$ as $n\to\infty$ for any $i,j\in\mathbb Z^d$ but $(0,0,0,\ldots)$ is not a distribution (even though it is invariant).
\end{remark}
\begin{example}
Take
$$P=\begin{pmatrix}
1-\alpha&\alpha\\
\beta&1-\beta
\end{pmatrix}$$
We already know that
$$p_{11}^{(n)}=\begin{cases}
\beta/(\alpha+\beta)+\alpha(1-\alpha-\beta)^n/(\alpha+\beta)\text{, if $\alpha+\beta>0$}\\
1\text{, otherwise}
\end{cases}$$
So if $\alpha+\beta\notin\{0,1\}$, we have $p_{11}^{(n)}\to\beta/(\alpha+\beta)$, similarly
$$P^n\to\frac{1}{\alpha+\beta}\begin{pmatrix}
\beta&\alpha\\
\beta&\alpha
\end{pmatrix},n\to\infty$$
Hence $(\beta/(\alpha+\beta),\alpha/(\alpha+\beta))$ is an invariant distribution.
\end{example}
Usually we cannot easily compute the entries of $P^n$ and take the limit to find an invariant distribution.
However, there is an obvious other way to get one.
\begin{example}
Consider
$$P=\begin{pmatrix}
0&1&0\\
0&1/2&1/2\\
1/2&0&1/2
\end{pmatrix}$$
So if we want $\pi P=\pi$, it gives the set of linear equations
$$\begin{cases}
\pi_1=\pi_3/2\\
\pi_2=\pi_1+\pi_2/2\\
\pi_3=\pi_2/2+\pi_3/2
\end{cases}$$
which we can solve to get $\pi_1=1/5,\pi_2=\pi_3=2/5$ which is indeed an invariant distribution.
\end{example}
\begin{definition}
For each state $k\in I$, let $\gamma_i^k$ be the expected time spent in the state $i$ between two visits to $k$, so
$$\gamma_i^k=\mathbb E_k\sum_{n=0}^{T_k-1}1_{X_n=i}=\mathbb E_k\sum_{n=1}^{T_k}1_{X_n=i}$$
\end{definition}
\begin{theorem}
Let $P$ be irreducible and recurrent, then:\\
(a) $\gamma_k^k=1$.\\
(b) $\gamma^k=(\gamma_i^k)_{i\in I}$ is an invariant measure.\\
(c) $0<\gamma_i^k<\infty$ for all $i\in I$.
\end{theorem}
\begin{proof}
(a) is obvious.
For (b), since $P$ is recurrent, we know $\mathbb P_k[T_k<\infty]=1$, so for $j\neq k$,
\begin{align*}
\gamma_j^k&=\mathbb E_k\sum_{n=1}^{T_k}1_{X_n=j}=\mathbb E_k\sum_{n=1}^\infty 1_{X_n=j,n\le T_k}\\
&=\sum_{n=1}^\infty\mathbb P_k[X_n=j,n\le T_k]\\
&=\sum_{j\in I}\sum_{n=1}^\infty\mathbb P_k[X_{n-1}=i,X_n=j,n\le T_k]\\
&=\sum_{j\in I}\sum_{n=1}^\infty\mathbb P_k[X_{n-1}=i,n\le T_k]\mathbb P[X_n=j|X_{n-1}=i]\\
&=\sum_{j\in I}p_{ij}\sum_{n=1}^\infty\mathbb P_k[X_{n-1}=i,n\le T_k]\\
&=\sum_{j\in I}p_{ij}\sum_{n=1}^\infty\mathbb E_k[1_{X_{n-1}=i,n\le T_k}]\\
&=\sum_{j\in I}p_{ij}\mathbb E_k\sum_{n=0}^{T_k-1}1_{X_n=i}\\
&=\sum_{i\in I}p_{ij}\gamma_i^k=(\gamma^kP)_j
\end{align*}
For (c), as $P$ is irreducible, there is $n,m\ge 0$ such that $p_{ik}^{(n)}>0,p_{ki}^{(m)}>0$.
So by (b) and (a),
$$\gamma_i^k\ge\gamma_k^kp_{ki}^{(m)}=p_{ki}^{(m)}>0,1=\gamma_k^k\ge\gamma_i^kp_{ik}^{(n)}\implies \gamma_i^k\le\frac{1}{p_{ik}^{(n)}}<\infty$$
As desired.
\end{proof}
This theorem has a partial inverse.
\begin{theorem}\label{inv_measure_exp}
Let $P$ be irreducible and $\lambda$ be an invariant measure with $\lambda_k=1$ for some $k$.
Then $\lambda_i\ge\gamma_i^k$ for every $i$.\\
If in addition $P$ is recurrent, then $\lambda=\gamma^k$.
\end{theorem}
\begin{proof}
Since $\lambda$ is invariant, for $j\neq k$,
\begin{align*}
\lambda_j&=\sum_{i_1\in I}\lambda_{i_1}p_{i_1j}\\
&=\sum_{i_1\neq k}\lambda_{i_1}p_{i_1j}+p_{kj}\\
&=\sum_{i_1\neq k}\left( \sum_{i_2\neq k}\lambda_{i_2}p_{i_2i_1}+p_{ki_1} \right)p_{i_1j} +p_{kj}\\
&=\cdots\\
&=\sum_{i_1,\ldots,i_n\neq k}\lambda_{i_n}p_{i_ni_{n-1}}\cdots p_{i_1j}\\
&\quad+\left( p_{kj}+\sum_{i_1\neq k}p_{ji_1}p_{i_1k}+\cdots\sum_{i_1,\ldots,i_{n-1}\neq k}p_{ki_{n-1}}\cdots p_{i_2i_1}p_{i_1j} \right)\\
&\ge p_{kj}+\sum_{i_1\neq k}p_{ji_1}p_{i_1k}+\cdots\sum_{i_1,\ldots,i_{n-1}\neq k}p_{ki_{n-1}}\cdots p_{i_2i_1}p_{i_1j}\\
&=\mathbb P_k[X_1=j,T_k\ge 1]+\mathbb P_k[X_2=j,T_k\ge 2]+\cdots+\mathbb P_k[X_n=j,T_k\ge n]\\
&=\mathbb E_k\left[ \sum_{m=1}^{\min\{n,T_k\}}1_{X_m=j} \right]\\
&=\mathbb E_k\left[ \sum_{m=0}^{\min\{n,T_k-1\}}1_{X_m=j} \right]\\
&\to \gamma_j^k,n\to\infty
\end{align*}
which proves the first part of the theorem.
Now define $\mu=\lambda-\gamma^k$ which is obviously also an invariant measure.
As $P$ is irreducible, for any $i$, there is some $n$ such that $p_{ik}^{(n)}>0$, therefore
$$0=\mu_k=\sum_{j=I}\mu_jp_{jk}^{(n)}\ge\mu_ip_{ik}^{(n)}\implies \mu_i=0$$
Hence $\mu=0$ which shows the second part.
\end{proof}
\begin{example}
1. The simple symmetric random walk on $\mathbb Z$ is clearly irreducible and recurrent.
The measure $\pi_i=1$ for all $i\in\mathbb Z$ is invariant.
By the theorem, every invariant measure are of the form $\pi_i=a$ for all $i\in\mathbb Z$ for some fixed $a$.
Consequently, there is no invariant distribution on this Markov chain.\\
2. (non-example) The simple symmetric random walk on $\mathbb Z^3$ has an invariant measure, but is not recurrent.
\end{example}
Note that a recurrent $i\in I$ does not necessarily have finite expected return time $m_i=\mathbb E_i[T_i]$.
\begin{definition}
A recurrent state $i\in I$ is positive recurrent if $m_i<\infty$, and is null recurrent otherwise.
\end{definition}
\begin{theorem}
Let $P$ be irreducible, then the following are equivalent:\\
(a) Every state is positive recurrent.\\
(b) Some state is positive recurrent.\\
(c) $P$ admits an invariant distribution $\pi$.\\
Moreover, when (c) holds, then $m_i=1/\pi_i$.
\end{theorem}
\begin{proof}
(a) clearly implies (b).
Assuming (b) and choose positive recurrent $i\in I$.
Now $\gamma^i$ is an invariant measure and
$$\sum_{j\in I}\gamma_j^i=m_i<\infty$$
Therefore $\pi_j=\gamma_j^i/m_i$ defines an invariant distribution.\\
Assuming (c), then $\forall k\in I$, $\pi_k=\sum_{i\in I}\pi_ip_{ik}^{(n)}>0$ for some $n$ as $P$ is irreducible.
Fix any $k$ and set $\lambda_i=\pi_i/\pi_k$, then $\lambda$ is an invariant measure with $\lambda_k=1$, therefore $\lambda\ge\gamma^k$ by Theorem \ref{inv_measure_exp}.
So
$$m_k=\sum_{i\in I}\gamma_i^k\le\sum_{i\in I}\frac{\pi_i}{\pi_k}=\frac{1}{\pi_k}<\infty$$
which means $k$ is positive recurrent.
Also, this means that if (c) holds, then $P$ has to be recurrent and the inequality has to be equality, that is $m_k=1/\pi_k$.
\end{proof}
\begin{example}
There exists Markov chains with more than one linearly independent invariant measures.
Consider the general random walk on $\mathbb Z$ with $p_{i,i+1}=p,p_{i,i-1}=q=1-p$ where $p\notin \{0,1/2,1\}$.
Then the constant and $\pi_i=(p/q)^i$ are both invariant measures but they are linearly independent.
\end{example} | {
"alphanum_fraction": 0.6136624569,
"avg_line_length": 53.4355828221,
"ext": "tex",
"hexsha": "fd51ea4e0bcf188098f5d8d48163ecbecefe1597",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cef4f20b59106a1deaed4de2f503e594e3ffc61d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "david-bai-notes/IB-Markov-Chains",
"max_forks_repo_path": "7/inv.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cef4f20b59106a1deaed4de2f503e594e3ffc61d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "david-bai-notes/IB-Markov-Chains",
"max_issues_repo_path": "7/inv.tex",
"max_line_length": 177,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cef4f20b59106a1deaed4de2f503e594e3ffc61d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "david-bai-notes/IB-Markov-Chains",
"max_stars_repo_path": "7/inv.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3342,
"size": 8710
} |
%!TEX root = ../thesis.tex
\chapter{Introduction}
\ifpdf
\graphicspath{{Chapters/Figs/Raster/}{Chapters/Figs/PDF/}{Chapters/Figs/}}
\else
\graphicspath{{Chapters/Figs/Vector/}{Chapters/Figs/}}
\fi
%********************************** % Section **************************************
\section{Embedded Systems}
Most results in information processing and computer science usually apply to the domain of general computing (Personal-Computers and desktop applications included). However, according to several forecasts, the future of information and communication technologies (ICT) is characterized by terms such as \emph{ubiquitous computing}, \emph{pervasive computing}, \emph{ambient intelligence}, and the \emph{post-PC era}. These terms reflect the fact that computing (and communication) will be everywhere, the information available anytime and anywhere. The technology leading this future is the embedded systems technology.%The main technology needed for next-generation ICT systems are the \textbf{embedded systems}.
\par Embedded systems, which are part of the broader area of Cyber-physical systems (CPS), are special-purpose computer systems designed to control or support the operation of a larger technical system, see \cite{BerkleyCPS} for a conceptual map. Unlike the general-purpose computer, they only perform a few specific and more or less complex pre-defined tasks. The typical use cases of CPSs are medical devices, aerospace, autonomous systems (like robots, autonomous cars or Unmanned Aerial Vehicles - UAVs), process, factory and environmental control, and intelligent buildings. CPS interact with the physical world and must operate dependably, safely, securely, efficiently, and in real-time.
\par In a simpler case, software consists of a single program running in a loop, starting at power-on, and responding to certain internal or external events. In more complex cases (robotics or aerospace) operating systems are employed providing features like multitasking, synchronization, resource management, among others.
\paragraph{}There are almost no areas of modern technology in which we could do without embedded systems. Rajkumar et al. \cite{Raj10} described CPS as \emph{the next computing revolution}. CPSs are starting to pervade areas such as wearable electronics and domotic applications. As they are becoming ubiquitous, we do not notice them anymore. Contemporary cars, for example, contain around 60 embedded computers\footnote{according to a 2014 report from the Alliance of Automobile Manufacturers}. The driver is not aware of them but uses their functionality.
\paragraph{}By definition, embedded systems operate in real-time, which means that their temporal behavior is equally important as their functional behavior. The verification of functional properties by formal methods is today a reality, applied to hardwrae and software designs. However, the verification of temporal behavior (especially for hybrid systems, in which a continuous-time dynamic needs to be accounted for) is much more difficult and typically provides limited results. In this case, the verification is usually based on testing and the quality and coverage of these tests depends mostly on the requirements of the certification process (when applicable) or simply the experience and intuition of the developers.
%********************************** % Section **************************************
\section{Mixed-Criticality}
%As a consequence of the increasing complexity of control algorithms,
As a consequence of the evolution of hardware systems, powerful and cheap computing platforms (especially multicore) are now available. These platform could support the execution of several functions, possibly with different criticality levels in an integrated fashion, with better flexibility, lower costs and power consumption. Criticality can include all forms of dependability (availability, integrity, etc.) \cite{dependability}, but it usually refers to functional safety, i.e., the absence of catastrophic consequences on the user and the environment. %The concept of criticality depends on the application, such
%and for economic reasons, embedded systems are being used into more safety-critical areas.
%Generally the integrity of the whole system depends on them and any failure could have severe consequences: for example endanger human safety.
This means that multiple functionalities (tasks) with different safety-critical levels, such as flight-critical and mission-critical tasks, could be integrated on a single, shared hardware device. as navigation or braking. However, the platform integration must be performed with the guarantee that the functions at different criticality levels are free from any type of mutual interference.
% Example of critical something
\paragraph{}A more formal definition of criticality can be obtained with reference to the safety standards (see \cite{MCSmisconception} for a more detailed discussion) that define the design and development processes for safety-critical embedded systems (hardware and software). There are a variety of domain specific safety standards, such ISO 26262 for road vehicles. This work is focused on aerospace where is usual to refer to DO-178C \cite{do178c} for avionic software and ARINC 653 \cite{arinc653} for avionics real-time operating systems. %DO254 for avionic hardware
\begin{description}
\item[DO-178C] (aka EUROCAE-ED-12B) was drafted by a co-operation of the European Organization for Civil Aviation Equipment (EUROCAE) and its US counterpart. The standard considers the entire software life-cycle and provides a basis for avionic systems certification. It defines five levels of criticality, from A (components whose failure would cause a catastrophic failure of the aircraft) to E (components whose failure have no effect on the aircraft or pilot workload) as in table \ref{tab:DAL}. This is the primary document by which the certification authorities approve all commercial software-based aerospace systems.
\item[ARINC-653] is a software specification for space and time partitioning in safety-critical avionics real-time operating systems. It allows the hosting of multiple applications of different software insurance levels (SIL) on the same hardware in the context of an Integrated Modular Avionics (IMA) architecture. Each software component is inside a partition and has its memory space and dedicated time slot. The current work includes the enhancement of ARINC-653 for multi-core processor architectures.
\end{description}
All these documents are used by the certification authorities that must establish system safety. In this process, developers need to convince official entities that all relevant hazards have been identified and dealt with. In general, mixed criticality approaches are hardly practical in today's processes. The reason is that demonstrating functional and time isolation for most multi-core platforms is quite challenging give the complex set of interactions occurring at the hardware level (at the level of the memory hierarchy or the intercore connects).
%. The reason lies mainly in a lack of confidence in such complex systems, and in the considerable effort needed for their verification. %Validation answer the question "Am I building the right system?", verification answer "Am I building the system right?" (both are needed in reality)
\begin{table}
\begin{center}
\begin{tabular}{cll}
\toprule
Level & Failure Condition & Failure Rate \\
\midrule
A & Catastrophic & $10^{-9}$/h \\
B & Hazardous & $10^{-7}$/h \\
C & Major & $10^{-5}$/h \\
D & Minor & $10^{-3}$/h \\
E & No Effect & N/A \\
\bottomrule
\end{tabular}
\caption {Failure rate per DO-178C criticality level}
\label{tab:DAL}
\end{center}
\end{table}
\paragraph{} All standards define different levels of concerns, in the aerospace field they are called Safety Integrity Level (SIL) in IEC 61508 and ARINC 653, or Design Assurance Level (DAL) in DO-178C. The levels indicate the severity and frequency of a function failure and assign requirements to failure probability, architectures, and design processes to each of the levels. They also regulate the combination of functions with different levels; so, they provide a basis for Mixed-Criticality systems design.
\par Because larger systems, such as vehicles or aircrafts, include a few safety-relevant applications and many non-critical ones, such as air conditioning or infotainment, mixed criticality is a well-known problem for both research and industrial actors. Safety standards strongly regulate mixed-critical design and integration. The generic standard IEC 61508 requires that \emph{sufficient independence} is demonstrated between functions of different criticalities. Designing the full system as being high-critical is clearly impractical. This approach is too costly. The application of the strict safety-critical development rules to non-critical functions, which often include third party (e.g., infotainment) subsystems, is far too costly, and demonstrating \emph{sufficient independence} is the only viable option.
%\paragraph{} The determination of the criticality is, in general, the result of the evaluation of the possible consequences of a failure (severity or hazard) on the occurrence of a failure.
\subsection{Robust Partitioning}
The concept of \emph{Robust partitioning} is defined differently by different standards, without an officially agreed or common definition \cite{robustpartitioning}. Rushby \cite{goldenrule} defines the \emph{Gold Standard for Partitioning} as \emph{"A robustly partitioned system ensures a fault containment level equivalent to its functionally equivalent federated system."}. Federated architecture is the traditional design for avionic architecture where each application is implemented in self-contained units. Wilding et al.\cite{goldenruleinvariant} define the \emph{Alternative Gold Standard for Partitioning} as \emph{"The behavior and performance of software in one partition must be unaffected by the software in other partitions"}, which is a stronger property and a sufficient condition to establish robust partitioning. In any case, robust partitioning consists of the following the concepts:
\begin{itemize}
\item \emph{Fault Containment}. Functions should be separated in such a way that no failure in one application can cause another function to fail. Low criticality tasks should not affect high criticality tasks.
\item \emph{Space Partitioning}. No function may access the memory space of other functions (unless explicitly configured).
\item \emph{Temporal Partitioning}. A function's access to a set of hardware resources during a period of time is guaranteed and cannot be affected by other functions..
\end{itemize}
\paragraph{} ARINC-653 contains its interpretation of robust partitioning: \emph{"The objective of Robust Partitioning is to provide the same level of functional isolation as a federated implementation."}. This space partitioning concept can be implemented on multi-core systems with the help of the Real-Time Operating Systems.
\subsection{Operating systems for Mixed-Criticality applications}
The core concept of demonstrating sufficient independence among different function can be approached using Kernels and schedulers that guarantee resource management to provide independence in the functional and time domain; separation kernels are the most notable example.
\par A separation kernel is an operating system-level resource manager that enforces \emph{spatial and temporal separation} among functionalities or partitions that are managed by it. The concept was first proposed by John Rushby in 1981 \cite{separationkernel}, \emph{"the task of a separation kernel is to create an environment which is indistinguishable from that provided by a physically distributed system: it must appear as if each regime is a separate, isolated machine and that information can only flow from one machine to another along known external communication lines. One of the properties we must prove of a separation kernel, therefore, is that there are no channels for information flow between regimes other than those explicitly provided."}. In other words, a single board platform that is indistinguishable by a federated system.
\par A typical structure of a separation kernel is depicted in figure \ref{fig:separationkernel}.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{SeparationKernel}
\caption{Typical Separation Kernel Architecture}
\label{fig:separationkernel}
\end{figure}
A partition is a logic unit maintained by the separation kernel, and each of them is separated from the other. For each partition, the separation kernel provides resources such as physical memory space, I/O memory space, CPUs, and so on (spatial separation). Moreover, separation kernels are typically implemented using time-triggered schedulers and assigning each partition a dedicated time slot in a cycle to provide time separation. Usually two type of partitions are supported: \emph{User Partitions} and \emph{System Partitions}. The partitions are identified configured by the system designer, which is a trusted person. The content of a user partition does not need to be approved by the designer and can be arbitrary, even malicious \cite{mils}, whereas a system partition contains applications and data supplied and approved by the designer. All partitions use the API (Application Program Interface) provided by the separation kernel to interact with it. Since partition are spatially isolated they cannot communicate directly with each other. They can only interact through the kernel API, and communicate under the supervision of the kernel. This communication occurs via objects that are statically configured by the designer. The separation kernel also includes the \emph{Hardware Abstraction Level}, which includes in part or completely the \emph{Board Support Package} (BSP). The BSP contains a set of drivers for the hardware components, providing an abstraction of the underlying hardware, an onboard support package can be exchanged without changing the content of any partition.
\paragraph{} Separation kernels are gaining importance thanks to the increasing adoption of multi-core embedded systems.
%********************************** % Section **************************************
\section{Multi-core embedded systems}
The traditional approach to provide an increased processing power to software (including embedded) applications is to increase the CPU clock frequency: to increase the instruction level parallelism through instruction pipelines; to increase the cache size and number of cache levels and so on. With today's technology, this approach is no longer sustainable. Increasing the CPU frequency causes excessive power consumption and thermal dissipation loss and raises more problems for the chip designbecause of the need for smaller sized features. Parallelization has become a key solution for this problem. For this reason multi-core platforms have the potential to meet modern computing requirements by offering greater computational capabilities and advantages in size, weight, and power (SWaP).
\paragraph{} However, in a multicore system different cores share hardware resources such as caches and central memory, which were developed focusing on maximizing the average performance, but when placed in the safety-critical context can introduce challenges to predictability.
\par Safety-critical multi-core CPS are still not fully embraced by the industry for safety critical applications. For example, aerospace systems are subject to costly and time-consuming certification processes, which require a predictable behavior under fault-free and certain hazardous conditions, hard to prove in multi-core platforms. Despite these problems (and the certification challenges), the industry is moving towards a higher exploitation of commercial-off-the-shelf (COTS) devices to reduce development costs\cite{mulcors}, as well as exploiting the low SWaP characteristics of multi-core.
%, despite the fact that the use of such boards is challenging for certification.
\paragraph{} The introduction of COTS multi-core processors is by motivated several aspects:
\begin{itemize}
\item Provide a long-term answer to the increasing demand of processing power.
\begin{itemize}
\item Increased performance: better exploitation of the thread parallelism
\item Increased integration: Less equipment to perform the same functionality or same equipment to
host more functionality
\item Reduce environmental footprint: Fewer embedded equipment, less power consumption, less dissipation compared to
the single core equivalent
\end{itemize}
\item Anticipate mass market obsolescence of single-core processors.
\item Be able to "simplify” the overall system architecture (for example a partitioned architecture can avoid Ethernet communication).
\end{itemize}
\paragraph{} The barrier to the adoption of the multi-core technology is its complexity of the certification. For the certification process it is important to ensure the \emph{Execution Integrity} of its software components. That means it will be correctly executed in a nominal situation, and the system state will be predictable in non-nominal situations (internal faults). Moreover, it must be possible to perform a \emph{WCET analysis} (Worst Case Execution Time) of the embedded software. Timing information is tightly coupled with both the software and hardware architecture since they introduce \emph{interferences} between parallel applications that are reflected in timing delays. In COTS this analysis become even more difficult because of the lack of documentation on the system design.
\subsection{Hardware interference channels}
Applications running on different cores of a multi-core processor are not executing independently from each other. Even if there is no explicit data or control flow between these applications, a coupling exists at platform level since they are implicitly sharing resources. A platform feature which may cause interference between independent applications is called a hardware interference channel. The analysis of hardware interference channels requires a deep understanding of the platform architecture including the CPU internals.
\par In this work, we consider only commercial multi-core platforms. Therefore we assume that the platform implements the Unified Memory Model, which means that all cores share the same physical address space. Figure \ref{fig:unifiedmemorymodel} depict a typical such architecture.
\begin{figure}[htbp]
\centering
\includegraphics[width=1.0\textwidth]{UnifiedMemoryModel}
\caption{Unified Memory Model Architecture}
\label{fig:unifiedmemorymodel}
\end{figure}
\subsubsection{Caches}
While two different cores can execute independently as long as they are not using shared resources, caches may introduce cross-CPU interference trough \emph{Cache Coherency} and \emph{Cache Sharing}. The L1 cache is typically divided into data and instruction cache while all other levels store data as well as instructions. Most multi-core processors have a dedicated L1 data and instruction cache for each core while other levels might be shared or not depending on the architecture. Shared caches are an essential cause of interference in a multi-core processor. If data used by tasks are small enough to fit inside the private cache of the processor no performance loss occurs. If it is not the case, data are replaced according to some \emph{cache replacement algorithm} on demand of the tasks executing on the cores. Therefore, tasks may experience long delays due to the recurring contentions when accessing the memory.
%\subsubsection{Cache Coherency}
\paragraph{} Another important aspect related to the use of caches is the consistency of local caches connected to a shared resource. Cache coherency is crucial in multi-core systems because: if one of the local caches of a core contains a reference to a physical resource, and the cached value is more recent than the value stored in the physical resource itself then any read access from any core must provide the value cached. In multi-core processors, cache coherency is solved by utilizing the core communication bus. So this mechanism determines how the memory system transfers data between processors, caches, and memory.
\subsubsection{Interconnect}
Multi-core processors are often part of a System on Chip (SoC) where the cores are packed together with peripherals such as external memory, serial I/O and Ethernet. To handle all requests to the shared peripherals, an \emph{interconnect} is implemented to arbitrate the requests. It is the key point where all the accesses are performed and arbitrated. Indeed, the interconnect has been built to sustain a higher bandwidth to serve all cores efficiently. Usually, a significant source of performance degradation is the concurrent access to shared Bus and shared I/O devices such as the graphical device, the GPIO (General Purpose Input Output) or the network interface. If a device can handle one request at a time, it may block the second request for hundreds of microseconds, or even worst for milliseconds. Moreover, shared devices can also rise Interrupts. On multi-core platforms, a hardware interrupt is typically routed to one core. If multiple devices are attached to one interrupt line and the devices are not served by the same core, the core which receives the interrupt must pass this interrupt to the other core(s). All these factors worsen the determinism of the systems and WCET analysis. Indeed, the execution time of software on one core depends on software executed on the other cores because of potential inter-core conflicts. The internal workings of the interconnect and how it prioritizes the requests are often part of the manufacturer's intellectual property, and they heavily impact on the amount and the duration of interferences. It may be difficult to determine an upper bound on their impact whatever the concurrent software even with full information on the design.
\par Characterizing the behavior of the interconnect in every possible situation in multi-core COTS is technically difficult. To overcome this problem the \emph{Interconnect Usage Domain} can be defined as a set of constraints restricting the accesses to the interconnect. The usage domains give the possibility to treat the interconnect as black-box. The objective is to reach an acceptable characterization of the interconnect behavior to enable further analyses even with poor documentation on the behavior.
\paragraph{} How these interference channels impact of the concurrent application also depends on the software architecture.
%\subsection{Software Interference}
%Obviously tasks interfere with each other due to access to shared I/O devices. However, can be noticed that the Operating Systems can be source of interference too. It must provide an execution environment for the hosted applications which on the one hand hides the platform characteristics from the hosted applications and on the other hand strictly controls the use of platform resources for all applications. Usually the operating system uses some additional software components called Board Support Package (BSP), it is the implementation of specific support code for a given board that conforms to a given operating system.
%\par The diversity of Multi-Core System on Chip (MPSoC) architecture leads to additional effort (and issues) during certification since the operating systems, the BSP and the code on top of them, must be certified by authorities.
\subsection{Parallelism basic concepts}
There are several types of parallelism used in multi-core programming, we can fist distinguish between \emph{Task Level Parallelism}, \emph{Data Level Parallelism}, and \emph{Instruction Level Parallelism} \cite{computerarchitecture}. Instruction level parallelism refer to overlapping the execution of instructions to improve performance. There are two largely separable approaches to exploiting this parallelism approach: \begin{enumerate*} \item an approach that relies on hardware to discover and exploit the parallelism dynamically, and \item an approach that relies on software technology to find parallelism statically at compile time.\end{enumerate*} Data level parallelism focuses on distributing the data across different cores, which operate on the data in parallel. It is commonly applied on regular data structures like arrays and matrices by working on each element in parallel. Finally, Task level parallelism focuses on distributing tasks (functional units) across cores. While Instruction and Data Level Parallelism are highly exploitable in FPGA and GPU, in this work we focus on Task Level Parallelism.
\paragraph{} At system level, Task Level Parallelism can be partitioned into two other classification: \emph{Asymmetric Multiprocessing} (AMP) and \emph{Symmetric Multiprocessing} (SMP).
\subsubsection{Asymmetric Multiprocessing}
In this approach each core runs its own single-core aware application (a partition) as in figure \ref{fig:AMP}. Scheduling inside a partition is sequential. The advantages of using this programming approach are:
\begin{itemize}
\item applications do not need to be multi-core aware; this simplifies the design.
\item reduced need to mutual exclusion.
\item interferences are mainly caused by shared caches, memory, I/O buses and concurrent access to shared devices.
\end{itemize}
The disadvantages are:
\begin{itemize}
\item all applications must be certified to the highest assurance level since they have full access to the processor;
\item it can be hard to identify different single-core aware, independent applications; this can limit the use of cores to few of them;
\item synchronization among applications running on different cores is more complex.
\end{itemize}
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{AMP}
\caption{Example of Asymmetric Multiprocessing}
\label{fig:AMP}
\end{figure}
\subsubsection{Symmetric Multiprocessing}
In this approach each application has access of all cores. Threads inside the partition run concurrently and the Operating System typically controls cores and platform resources as shown in figure \ref{fig:SMP}. The advantages of this approach are:
\begin{itemize}
\item More flexibility is allowed and better load balancing can be achieved.
\item There is only one system component responsible for the partitioning.
\item Different safety-levels are allowed in the system (mixed-critical systems).
\item Application can still be completely isolated by the other, e.g. by disabling concurrent execution.
\item Inter-process conflicts does not impact time and space partitioning as they occur inside the same partition.
\end{itemize}
\par The disadvantages come from the additional system-level software components:
\begin{itemize}
\item System level components responsible for the partitioning are complex and must be certified with the highest level of insurance.
\item Synchronization effort can be higher.
\item Due to the shared system software layer, an implicit coupling of unrelated threads cannot be completely avoided.
\end{itemize}
A careful design can limit the impact of these drawbacks.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{SMP}
\caption{Example of Symmetric Multiprocessing}
\label{fig:SMP}
\end{figure}
\subsubsection{Selected approach}
The asymmetrical approach presents some difficulties in the demonstration of robust partitioning. It is interesting for very specialized applications, for example where, in a dual-core platform, one core is completely dedicated to I/O processing and the other core runs the application software. On the other hand, the symmetric approach needs to be considered to use at best the platform. All its drawbacks are manageable if the systems runs under control of the (trusted) operating systems and the design take into account all the possible issues. Moreover we assume that future embedded systems have to host, besides the critical applications, an increasing number of applications with high performance requirements but lower criticality.
\paragraph{} Implementing a mechanism to isolate different application inside a multi-core platform is crucial. A virtualization layer hosting several virtual machines can provide this service.
%********************************** % Section **************************************
\section{Virtualization in Embedded Systems}
The main concept for the design of a mixed-critical system is, first and foremost, the demonstration of sufficient independence among software components. System virtualization, which is the abstraction and management of system resources, facilitates the integration of mixed-criticality systems \cite{multipartes}. This approach results in independent virtual machines that are fully contained in an execution environment that can not affect the remaining system.
\subsection{Overview}
Platform virtualization refers to the creation of \emph{Virtual Machines} (VMs), also called guest OS, running on the physical machine and managed by a \emph{hypervisor}. Virtualization technology enables concurrent execution of multiple VMs on the same hardware (single or multi-core) processor. Virtualization technology has been widely applied in the enterprise and cloud computing field, however, in recent years, it has been increasingly deployed in the embedded systems domain. Virtualization for embedded systems must address real-time behavior, safety, and security such that it offers protection against external attacks and unintended
interactions between the critical and non-critical components of the system. The hypervisor provides an isolation mechanism that can encapsulate an entire OS and applications into a Virtual Machine (VM).
\subsection{Hypervisor types}
In 1974 Gerald J. Popek and Robert P. Goldberg \cite{popek1974formal} classified hypervisors in two categories:
\begin{itemize}
\item \emph{Level II hypervisor} as a software layer that runs on top of a General Purpose Operating System (GPOS) \cite{Kleidermacher2013} (fig. \ref{fig:HypervisorL2}). It takes advantage of the underlying Operating System services and hardware abstraction to enable the creation of virtual machines. However, the security of type II hypervisors is as robust as the host GPOS. Therefore, the hypervisor can be subverted by one of the security gaps in the host GPOS, thereby corrupting the entire system. Additionally, the host OS layer increases system complexity and overall code size, which is a major factor for resource-constrained embedded systems. As a result, type II hypervisors are not suited for most embedded systems.
\item \emph{Level I hypervisor} as a software layer that runs directly on the hardware platform (bare-metal) (fig. \ref{fig:HypervisorL1}. This approach avoids the complexity and inefficiency of GPOS, and can achieve a higher level of isolation for safety and security critical applications \cite{Kleidermacher2013}.
\end{itemize}
\begin{figure}
\begin{subfigure}{0.5\textwidth}
\centering
\includegraphics[width=.9\textwidth]{HypervisorL1}
\caption{Type I}
\label{fig:HypervisorL1}
\end{subfigure}%
\begin{subfigure}{0.5\textwidth}
\begin{subfigure}{\textwidth}
\centering
\includegraphics[width=.8\textwidth]{HypervisorL2}
\caption{Type II}
\label{fig:HypervisorL2}
\end{subfigure}
\end{subfigure}
\caption{Hypervisor Types}
\label{fig:interactions}
\end{figure}
The selected operating system, PikeOS, is a Level I hypervisor. It provides safe and security services through virtualization.
\subsection{Virtualization Approaches}
Virtualizing an operating systems requires placing a virtualization layer under the operating system to create and manage the virtual machines. As clarified below, virtualization is provided mainly in two ways \cite{practicalmicrokernel}:
\begin{itemize}
\item \emph{Full/Native Virtualization}. With this technique, guest operating systems are unmodified and unaware of the virtualization environment. Each virtual machine is provided with all services of the physical system (e.g. virtual BIOS, virtual devices, and virtual memory). Full virtualization usually employs binary translation techniques to trap-and-emulate non-virtualizable and sensitive system instructions or hardware assistance \cite{vmwarevirtualization}. However, the computational complexity of this technique results in an unacceptable performance level for embedded systems \cite{Kleidermacher2013}.
\item \emph{Para-virtualization}. Unlike full virtualization, in para-virtualization, guest operating systems are modified to improve the performance of the hypervisor. These modifications are applied specifically to the guest OS kernel to replace non-virtualizable instructions and critical kernel operations with hypercalls that can request services directly from the hypervisor. These services represent system calls that are part of the OS kernel, and they execute with the highest privilege level in the system. Consequently, the hypervisor is the only software component to be executed in privileged mode. Para-virtualization overcomes the issues of full virtualization, and it is the only viable solution for embedded platforms that do not provide any hardware virtualization support\cite{Kleidermacher2013}.
\end{itemize}
\subsection{Microkernel-Based Hypervisor}
In order to increase the robustness of the hypervisor, its size should be as small as possible. Microkernel-based hypervisors represent a thin software layer that runs as bare-metal in the highest privileged mode. It can provide strong isolation among the guest operating systems. This approach implements virtualization as a service on top of the trusted microkernel wich is the near-minimum amount of software that implements the needed mechanisms to implement an operating system. Therefore, each separate instance is as robust as the guest environment itself. Since the code size of the hypervisor is small, it is easier to verify and validate. Authors from Lockheed Martin \cite{LockheedMartinVMIMA} presented a study towards the application of a Microkernel-Based Hypervisor architecture to enable virtualization for a representative set of avionics applications requiring multiple OS environments and a mixed-critical application.
%\subsection{Requirements for mixed-criticality}
%The main concept for the design of a mixed-critical system is, first and foremost, the demonstration of sufficient independence. Common mechanisms are:
%\begin{itemize}
%\item kernels and schedulers that guarantee resource and time isolation; separation kernels are the most notable example.
%\item monitors to detect timing faults (and eventually control them), and the scheduling schemes for guaranteeing controllability in the presence of faults.
%\end{itemize}
%\par A separation kernel is an operating system that enforces spatial and temporal separation among functionalities or partitions that are managed by it. One example that can be found in the state of the art is the commercial Hypervisor/OS called pikeOS from SysGO, it implements the avionic ARINC 653 standard, consisting of a separation microkernel that provides paravirtualization to real-time operating system for running partitions.
%\par Monitors can be used to prevent the propagation of failures and reduce the criticality of but they introduce overhead and further certification issues. PikeOS also provides mechanism for implementing monitors and different scheduling schemes.
\subsection{Available Solutions}
Many hypervisor solutions are available as either open-source or commercial products. For example, the Xen hypervisor has recently been ported to Xilinx Zynq Multi-Processor System-on-Chip (MPSoC) devices \cite{xenZynq}. Xen Zynq Distribution is released under the GNU General Purpose License 2 (GPL2) but it is not designed for mixed-critical applications. Several commercial RTOS products that comply with the ARINC 653 standard \cite{embeddedvmstate} are available, e.g. LynuxWorks LynxOS-178 \cite{LynxOS}, Green Hills INTEGRITY-178B \cite{INTEGRITY178B}, Wind River VxWorks 653 \cite{VxWorks}, Real- Time Systems GmbH Hypervisor \cite{RTGmbH}, Tenasys eVM for Windows \cite{eVM}, National Instruments Real-Time Hyper Hypervisor \cite{NIHypervisor}, Open Synergy COQOS \cite{COQOS}, Enea Hypervisor \cite{EneaHypervisor} etc.
\paragraph{} Even though the offer is quite big, in this thesis we focus on PikeOS \cite{PikeOS} from SysGO AG. It is a microkernel-based Type one hypervisor certified for the most common standards (e.g. Do-178C, ARINC-653, ISO 26262 etc.) and has been designed for functional safety and Security requirements which makes it a suitable choice for our applications.
%********************************** % Section **************************************
\section{Model Based design}
Applications are evolving to cover more and more complex functionalities. The increase in complexity is leading to an increase in the required throughput, and it is becoming a challenge for software developers. Model-based Design (MDB) appears as an excellent solution to cope with this complexity increase.
\par Model-Based Design is a model-centric approach to system development that enables system-level simulation, automatic code generation, and continuous test and verification. Rather than relying on physical prototypes (that can be very expensive) and textual specifications, Model-Based Design uses a model throughout development. The model includes every component relevant to system behavior—algorithms, control logic, physical components, and intellectual property (IP). MDB is being adopted in all areas of engineering; moreover, certification has matured considerably in the last decade attracting considerable interest from companies; recent studies \cite{mbdaerospaceverification} have shown that the application of model-based certification and formal verification can be a practical and cost-effective solution against certification requirements.
\par Examples of available commercial tools are Simulink\textregistered \cite{Simulink}, SCADE Suite\textregistered \cite{Scade}, LabVIEW\textregistered \cite{Labview} and SystemModeler\textregistered \cite{Modeler}. Open source and research tools include Scicos \cite{Scicos} and Ptolemy\cite{Ptolemy}. In this work we use Simulink which is the \emph{de-facto standard} and provides mature tools for design, simulation, and code generation.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.7\textwidth]{MBDflowchart}
\caption{Model-Based Design Flow Chart}
\label{fig:mbdflowchart}
\end{figure}
\paragraph{} The model includes every component relevant to system behavior-algorithms, control logic, physical components and intellectual property (IP). This make MBD the perfect candidate for a complete work-flow that goes from the system development, verification, validation and deployment. A typical design flow in MDB is shown in figure \ref{fig:mbdflowchart}. The traditional embedded system development process follows the standard \emph{V-shaped} lifecycle. V-cycle splits the product development process into a design and an integration phase. The Code Generation step is the turning point of the process. This work focus on this step.
%********************************** % Section **************************************
\section{EMC\textsuperscript{2}}
This work fits inside the European EMC\textsuperscript{2} - "Embedded Multi-Core Systems for Mixed Criticality applications in dynamic and changeable real-time environments" project \cite{emc2artemis}. The objective of the project is to foster changes through an innovative and sustainable service-oriented architecture approach for mixed-criticality applications; the project bundles the power of 98 partners from 19 European Countries and 100 millions of euro in budget.
\paragraph{} Within the EMC\textsuperscript{2} project the objective was to demonstrate the possibility of using a Model-Based Design approach to assist the design and the implementation of mix-critical applications running in multi-core platforms. For that purpose, we selected a typical aerospace use case: motor drive control. This is a widely known application that is used for example in the control of the actuators for the primary and secondary flight control. The platform selected to implement this use case was the Xilinx ZedBoard\texttrademark \cite{zedboard} based on the Xilinx Zynq\textregistered-7000 All Programmable System-on-Chip (SoC). The board is equipped with dual-core ARM\textregistered Cortex-A9 processors and an Artix-7 FPGA which adds more complexity to the design and more flexibility. For the motor control we used the \emph{FMCMOTCON2} \cite{FMCMOTCON2} evaluation kit from Analog Devices (figure \ref{fig:fmcmotcon} which provide a complete motor drive system including a stepper motor, a control board, a low voltage drive board and a Dynamometer drive system.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.9\textwidth]{FMCMOTCON2}
\caption{Analog Devices FMCMOTCON2 Evaluation Kit}
\label{fig:fmcmotcon}
\end{figure}
%********************************** % Section **************************************
%\section{Goal}
%This work presents a model-based approach for automatic code generation for mixed-criticality, multicore, embedded application using an Hypervisor operative system; mainly focusing on aerospace use-cases.
%Our work aims to exploit the multicore platform advantages as much as possible while ensuring an acceptable level of safety among the different mixed critical tasks composing the systems, mainly from aerospace use-cases.
| {
"alphanum_fraction": 0.8064054563,
"avg_line_length": 147.1333333333,
"ext": "tex",
"hexsha": "0b21746249be319ad5b44c981d6b187b0a2221c4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "47cf8aff592557c1ca990404dc7c079e09307262",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pantonante/EMC2-thesis",
"max_forks_repo_path": "Chapters/introduction.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "47cf8aff592557c1ca990404dc7c079e09307262",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pantonante/EMC2-thesis",
"max_issues_repo_path": "Chapters/introduction.tex",
"max_line_length": 1696,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "47cf8aff592557c1ca990404dc7c079e09307262",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pantonante/EMC2-thesis",
"max_stars_repo_path": "Chapters/introduction.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 8430,
"size": 41933
} |
\documentclass{memoir}
\usepackage{notestemplate}
\usetikzlibrary{matrix,arrows.meta}
%\logo{~/School-Work/Auxiliary-Files/resources/png/logo.png}
%\institute{Rice University}
%\faculty{Faculty of Whatever Sciences}
%\department{Department of Mathematics}
%\title{Class Notes}
%\subtitle{Based on MATH xxx}
%\author{\textit{Author}\\Gabriel \textsc{Gress}}
%\supervisor{Linus \textsc{Torvalds}}
%\context{Well, I was bored...}
%\date{\today}
%\makeindex
\begin{document}
% \maketitle
% Notes taken on
The idea of a tensor product of modules \(M,N\) is to form another module in which we can take products \(mn\) of elements \(m \in M\) and \(n \in N\).
\begin{defn}[Ring Extension]
Let \(R\leq S\). If \(\prescript{}{S}N\) is a left \(S\)-module, then \(N\) can also construct a left \(R\)-module since the elements of \(R\) act on \(N\) by assumption.\\
More generally, if \(f:R\to S\) is a ring homomorphism from \(R\) into \(S\) with \(f(1_R) = 1_S\), then \(N\) can be considered as an \(R\)-module with \(rn = f(r)n\) for \(r \in R\) and \(n \in N\). We consider \(S\) a \textbf{ring extension of \(R\)} and the resulting \(R\)-module is said to be obtained from \(N\) by \textbf{restriction of scalars} from \(S\) to \(R\).
\end{defn}
One might wonder if the reverse can be done-- taking a ring \(R\) and attempting to extend the scalars to a larger ring. This cannot be done in general-- for example, one can check that \(\Z\) cannot be made into a \(\Q\)-module.\\
However, while \(\Z\) cannot be made into a \(\Q\)-module, it is contained in a \(\Q\)-module (\(\prescript{}{\Q}\Q\)). That is, there is an embedding of the \(\Z\)-module \(\prescript{}{\Z}\Z\) into the \(\Q\)-module \(\prescript{}{\Q}\Q\). This isn't always the case however-- to see this, consider the \(\Z\)-module \(\prescript{}{\Z}N\) where \(N\) is a finite abelian group. One can check that there are no nonzero homomorphisms into any \(\Q\)-module.\\
Our goal will be to construct a module which is the best candidate for which we can embed into.
\begin{defn}[Tensor Product of Modules]
Let \(\prescript{}{R}N\) be a general \(R\)-module that we wish to embed into some \(S\)-modue. First we will try and define a product of the form \(sn\) for \(s \in S\), \(n \in N\).\\
We start by considering the free \(\Z\)-module on the set \(S\times N\). This is the collection of all finite commuting sums of elements of the form \((s_i,n_i)\) with no relations imposed on \(sn\). Our \(S\)-module structure requires that we must satisfy the relations
\begin{align*}
r_1:& \quad(s_1+s_2)n = s_1n + s_2n \\
r_2:& \quad s(n_1+n_2) = sn_1 + sn_2\\
r_3:& \quad (sr)n = s(rn)
\end{align*}
for \(s_1,s_2,s \in S\), \(r \in R\), and \(n \in N\). We let \(H\leq N\) be the subgroup given by \(H := \langle r_1,r_2,r_3 \rangle \), that is, all elements of the form above, and consider \(N / H\). We denote this quotient group by \(S \otimes_R N\) and call it the \textbf{tensor module product of \(S\) and \(N\) over \(R\)}. We denote \(s \otimes n\) the coset containing \((s,n)\) in \(S \otimes_R N\) and so we have
\begin{align*}
(s_1+s_2) \otimes n = s_1\otimes n + s_2 \otimes n\\
s \otimes (n_1+n_2) = s\otimes n_1 + s \otimes n_2\\
sr \otimes n = s \otimes rn.
\end{align*}
The elements of \(S \otimes_R N\) are called \textbf{tensors of modules} and can be written as finite sums of \textbf{simple tensors of modules} of the form \(s \otimes n\) with \(s \in S, n \in N\).\\
Now we give the tensor module product \(S \otimes_R N\) an \(S\)-module action by
\begin{align*}
s\left( \sum_{\textrm{finite}} s_i \otimes n_i \right) = \sum_{\textrm{finite}} (ss_i) \otimes n_i.
\end{align*}
We call this module \(\prescript{}{S}(S \otimes_R N)\) the \textbf{\(S\)-module obtained by extension of scalars from the \(R\)-module \(N\)}.
\end{defn}
The natural map \(\iota: N \to S \otimes_R N\) defined by \(n\mapsto 1 \otimes n\). Because \(1 \otimes rn = r(1 \otimes n)\), it follows that \(\iota\) is an \(R\)-module homomorphism from \(N\) to \(S \otimes_R N\). It is not injective in general, and so \(S \otimes_R N\) need not contain an isomorphic copy of \(N\).\\
Because the relatons imposed were the minimal relations necessary, one would expect that \(S \otimes_R N\) is the best possible \(S\)-module for a module homomorphism.
\begin{thm}[Universal Property for Tensor Modules]
Let \(R\leq S\), let \(\prescript{}{R}N\) be a left \(R\)-module and let \(\iota: N \to S \otimes_R N\) be the \(R\)-module homomorphism defined by \(\iota(n) = 1 \otimes n\). Suppose that \(\prescript{}{S}L\) is an arbitrary left \(S\)-module and \(\varphi :N \to L\) is an \(R\)-module homomorphism from \(N\) to \(L\). Then there is a unique \(S\)-module homomorphism \(\Phi :S \otimes_R N \to L\) such that \(\varphi = \Phi \circ \iota \) and the diagram commutes:
\begin{center}
\begin{tikzpicture}
\matrix (m)
[
matrix of math nodes,
row sep = 3em,
column sep = 4em
]
{
N & S \otimes_R N \\
& L \\
};
\path
(m-1-2) edge [->] node [right] {\(\Phi \)} (m-2-2)
(m-1-1) edge [->] node [above] {\(\iota\)} (m-1-2)
(m-1-1) edge [->] node [below] {$\varphi$} (m-2-2);
\end{tikzpicture}
\end{center}
Conversely, if \(\Phi :S \otimes_R N \to L\) is an \(S\)-module homomorphism then \(\varphi = \Phi \circ \iota\) is an \(R\)-module homomorphism from \(N\) to \(L\).
\end{thm}
\begin{cor}
Let \(\iota:N \to S \otimes_R N\) be the \(R\)-module homomorphism from the universal property of free modules. Then \(N / \textrm{Ker}\iota\) is the unique largest quotient of \(N\) that can be embedded in any \(S\)-module.\\
In particular, \(N\) can be embedded as an \(R\)-submodule of some left \(S\)-module if and only if \(\iota\) is injective.
\end{cor}
\begin{exmp}
\end{exmp}
\subsection{General Tensor Product}
\label{sub:general_tensor_product}
Notice that forming \(S\otimes_R N\) as an abelian group only required \(S_R\) to be a right \(R\)-module and \(\prescript{}{R}N\) a left \(R\)-module. We can construct an abelian group \(M \otimes_R N\) for any right \(R\)-module \(M_R\) and any left \(R\)-module \(\prescript{}{R}N\).\\
The \(S\)-module structure on \(\prescript{}{S}{(S \otimes_R N)}\) required only a left \(S\)-module structure on \(\prescript{}{S}S\) and the compatibility relation
\begin{align*}
s'(^2) = (s's)r.
\end{align*}
\begin{defn}[Tensor]
Let \(\prescript{}{R}N\) be a left \(R\)-module and \(M_R\) a right \(R\)-module. We obtain an abelian group by quotient of the free \(\Z\)-module on \(M\times N\) by the subgroup \(H = \langle r_1,r_2,r_3 \rangle \), where
\begin{align*}
r_1:& \quad (m_1+m_2,n) = (m_1,n) + (m_2,n)\\
r_2:& \quad (m, n_1+n_2) = (m,n_1) + (m,n_2)\\
r_3:& \quad (mr,n) = (m,rn)
\end{align*}
for \(m,m_1,m_2 \in M\), \(n,n_1,n_2 \in N\), and \(r \in R\). We denote this by
\begin{align*}
M \otimes_R N := \prescript{}{\Z}(M \times N) / \langle r_1,r_2,r_3 \rangle
\end{align*}
and call it the \textbf{tensor product of \(M\) and \(N\) over \(R\)}. The elements of \(M \otimes_R N\) are called \textbf{tensors}, and the coset \(m \otimes n \in M \otimes_R N\) is called a \textbf{simple tensor}. Every tensor can be written (non-uniquely) as a finite sum of simple tensors.
\end{defn}
Keep in mind that \(m \otimes n\) are cosets and not elements directly-- so caution must be used when defining maps on tensor products. That is, one needs to check that a map is well-defined on the entirety of a coset.\\
Furthermore, caution must be exercised when comparing tensor products. For example, if \(M \leq M'\), we can have \(m \otimes n = 0\) in \(M' \otimes_R N\) but \(m \otimes n \neq 0\) in \(M \otimes_R N\). This essentially captures the notion that when more elements are included, the cosets will change. Hence there is no reason to expect \(M \otimes_R N \leq M' \otimes_R N\).
\begin{defn}
Let \(M_R\) be a right \(R\)-module, \(\prescript{}{R}N\) a left \(R\)-module, and \(L\) an abelian group. A map \(\varphi :M\times N \to L\) is called \textbf{\(R\)-balanced} or \textbf{middle linear with respect to \(R\)} if
\begin{align*}
\varphi (m_1+m_2,n) &= \varphi(m_1,n) + \varphi (m_2,n)\\
\varphi (m,n_1+n_2) &= \varphi (m,n_1) + \varphi (m,n_2)\\
\varphi (m,rn) &= \varphi (mr,n)
\end{align*}
for all \(m,m_1,m_2 \in M\), \(n,n_1,n_2 \in N\) and \(r \in R\).
\end{defn}
We can define a map \(\iota:M\times N \to M \otimes_R N\) with \(\iota(m,n) = m \otimes n\). This map is not a group homomorphism but it is in fact \(R\)-balanced.
\begin{thm}[Universal Property of Tensor Products]
Let \(R\) be a ring, \(M_R\) a right \(R\)-module, and \(\prescript{}{R}N\) a left \(R\)-module. Let \(M \otimes_R N\) be the tensor product of \(M\) and \(N\) over \(R\) and let \(\iota: M \times N \to M \otimes_R N\) be the \(R\)-balanced map defined by \(\iota(m,n) = m \otimes n\). Then for any group homomorphism \(\Phi :M \otimes_R N \to L\) to an abelian group \(L\), the composite map
\begin{align*}
\varphi = \Phi \circ \iota
\end{align*}
is an \(R\)-balanced map from \(M \times N \to L\). Conversely, if \(L\) is an abelian group and \(\varphi :M \times N \to L\) is any \(R\)-balanced map, then there is a unique group homomorphism \(\Phi: M \otimes_R N \to L\) such that \(\varphi = \Phi \circ \iota\).\\
In other words, there is a correspondence between \(\varphi \) and \(\Phi \) by the commutative diagram:
\begin{center}
\begin{tikzpicture}
\matrix (m)
[
matrix of math nodes,
row sep = 3em,
column sep = 4em
]
{
M \times N & M \otimes_R N \\
& L \\
};
\path
(m-1-2) edge [->] node [right] {\(\Phi \)} (m-2-2)
(m-1-1) edge [->] node [above] {\(\iota\)} (m-1-2)
(m-1-1) edge [->] node [below] {$\varphi$} (m-2-2);
\end{tikzpicture}
\end{center}
and this correspondence establishes a bijection between \(R\)-balanced maps and group homomorphisms, by the bijection between \(\varphi \) and \(\Phi \).
\end{thm}
\begin{cor}
Suppose \(D\) is an abelian group and \(\iota':M \times N \to D\) is an \(R\)-balanced map such that
\begin{itemize}
\item \(D = \langle \textrm{Im}(\iota')\rangle \)
\item every \(R\)-balanced map defined on \(M\times N\) factors through \(\iota'\)
\end{itemize}
Then there is an isomorphism \(f: M \otimes_R N \cong D\) of abelian groups with \(\iota' = f \circ \iota\)
\end{cor}
Now we'd like to give this abelian group a module structure. We simply need to impose a compatibility structure on \(M\) to obtain this.
\begin{defn}
Let \(R,S\) be rings. An abelian group \(M\) induces a \textbf{\((S,R)\)-bimodule} if \(M\) forms a left \(S\)-module, a right \(R\)-module, and \(s(mr) = (sm)r\) for all \(s \in S, r \in R, m \in M\).
\end{defn}
\begin{exmp}
Let \(R\) be a commutative ring. A left \(R\)-module \(\prescript{}{R}M\) can always be given the structure of a right \(R\)-module by simply defining \(mr = rm\), and hence \(M\) becomes a \((R,R)\)-bimodule. We call this the \textbf{standard} \(R\)-module structure on \(M\).
\end{exmp}
Notice that if \(N\) has a left \(R\)-module structure and \(M\) a \((S,R)\)-bimodule structure, then we have once again
\begin{align*}
s \left( \sum_{\textrm{finite}} m_i \otimes n_i \right) = \sum_{\textrm{finite}} (sm_i) \otimes n_i
\end{align*}
and hence there is a well-defined action of \(S\) so that \(M \otimes_R N\) can be considered a left \(S\)-module. It follows that for fixed \(s\), \((m,n)\mapsto sm \otimes n\) is an \(R\)-balanced map, and hence there is a well-defined group homomorphism \(\lambda_s:M\otimes_R N \to M \otimes_R N\) that satisfies \(\lambda_s(m \otimes n) = sm \otimes n\).\\
A special case we might encounter is when \(M,N\) are left modules over a commutative ring \(R\), and \(S = R\). Then the standard \(R\)-module structure on \(M\) gives \(M\) the structure of an \((R,R)\)-bimodule and hence \(M \otimes_R N\) always has the structure of a left \(R\)-module.
\begin{defn}
Let \(R\) be a commutative ring and let \(M,N,L\) be left \(R\)-modules. The map \(\varphi : M \times N \to L\) is called \textbf{\(R\)-bilinear} if it is \(R\)-linear in every factor:
\begin{align*}
\varphi (r_1m_1+r_2m_2,n) &= r_1 \varphi (m_1,n) + r_2 \varphi (m_2,n)\\
\varphi (m,r_1n_1 + r_2n_2) = r_1 \varphi (m,n_1) + r_2 \varphi (m,n_2)
\end{align*}
for all \(m, m_1, m_2 \in M\), \(n,n_1,n_2 \in N\), and \(r_1,r_2 \in R\).
\end{defn}
\begin{cor}
Suppose \(R\) is a commutative ring, and \(M,N\) two left \(R\)-modules. Let \(M \otimes_R N\) be the tensor product of \(M\) and \(N\) over \(R\), where \(M\) is given the standard \(R\)-module structure. Then \(M \otimes_R N\) is a left \(R\)-module with
\begin{align*}
r(m \otimes n) = (rm) \otimes n = (mr) \otimes n = m \otimes (rn)
\end{align*}
and the map \(\iota:M \times N \to M \otimes_R N\) with \(\iota(m,n) = m \otimes n\) is an \(R\)-bilinear map. If \(L\) is any left \(R\)-module then there is a bijection between \(R\)-bilinear maps and \(R\)-module homomorphisms induced by the bijection between \(\varphi: M\times N \to L \) and \(\Phi: M \otimes_R N \to L \) via the commutative diagram:
\begin{center}
\begin{tikzpicture}
\matrix (m)
[
matrix of math nodes,
row sep = 3em,
column sep = 4em
]
{
M \times N & M \otimes_R N \\
& L \\
};
\path
(m-1-2) edge [->] node [right] {\(\Phi \)} (m-2-2)
(m-1-1) edge [->] node [above] {\(\iota\)} (m-1-2)
(m-1-1) edge [->] node [below] {$\varphi$} (m-2-2);
\end{tikzpicture}
\end{center}
\end{cor}
\begin{exmp}
\end{exmp}
\begin{thm}[Tensor Product of Homomorphisms]
Let \(M, M'\) be right \(R\)-modules, let \(N,N'\) be left \(R\)-modules. Suppose \(\varphi :M \to M'\) and \(\psi : N \to N'\) are \(R\)-module homomorphisms. Then there is a unique group homomorphism denoted by \(\varphi \otimes \psi \) given by
\begin{align*}
\varphi \otimes \psi : M \otimes_R N \to M' \otimes_R N'\\
(\varphi \otimes \psi )(m \otimes n) = \varphi (m) \otimes \psi (n)
\end{align*}
for all \(m \in M\) and \(n \in N\).\\
If \(M,M'\) are \((S,R)\)-bimodules for some ring \(S\), and \(\varphi \) is also an \(S\)-module homomorphism, then \(\varphi \otimes \psi \) is a homomorphism of left \(S\)-modules. Hence if \(R\) is commutative then \(\varphi \otimes \psi \) is always an \(R\)-module homomorphism for the standard \(R\)-module structures.
\end{thm}
Notice that the uniqueness conditions tells us that if \(\lambda :M' \to M''\) and \(\mu : N' \to N''\) are \(R\)-module homomorphisms, then
\begin{align*}
(\lambda \otimes u ) \circ (\varphi \otimes \psi ) = (\lambda \circ \varphi ) \otimes (\mu \otimes \psi ).
\end{align*}
In fact, we can use this idea to extend the tensor product into an \(n\)-fold tensor product.
\begin{thm}
Suppose \(M\) is a right \(R\)-module, \(N\) is an \((R,T)\)-bimodule, and \(L\) is a left \(T\)-module. Then there is a unique isomorphism
\begin{align*}
(M \otimes_R N) \otimes_T L \cong M \otimes_R (N \otimes_T L)
\end{align*}
of abelian groups such that
\begin{align*}
(m \otimes_R n) \otimes l \mapsto m \otimes_R(n \otimes_R l).
\end{align*}
If \(M\) is an \((S,R)\)-bimodule, then this is an isomorphism of \(S\)-modules.
\end{thm}
\begin{cor}
Let \(R\) be a commutative ring and \(M,N,L\) form left \(R\)-modules. Then
\begin{align*}
(M \otimes_R N) \otimes_R L \cong M \otimes_R(N \otimes_R L)
\end{align*}
\end{cor}
Of course, it will be useful to use the natural extension of a bilinear map.
\begin{defn}
Let \(R\) be a commutative ring and let \(M_1,M_2,\ldots,M_n\) and \(L\) form \(R\)-modules with the standard \(R\)-module structures. A map \(\varphi : M_1 \times \ldots \times M_n \to L\) is called \textbf{\(n\)-multilinear over \(R\)} if it is an \(R\)-module homomorphism in each component:
\begin{align*}
\varphi (m_1,\ldots,m_{i-1},rm_i + r'm'_i, m_{i+1},\ldots,m_n) = r \varphi(m_1,\ldots,m_i,\ldots,m_n) + r' \varphi (m_1,\ldots,m_i', \ldots, m_n)
\end{align*}
\end{defn}
Hence we can define an \(n\)-fold tensor product by iterating the tensor product of pairs of modules.
\begin{cor}
Let \(R\) be a commutative ring and let \(M_1,\ldots,M_n,L\) be \(R\)-modules. Let \(M_1 \otimes_R M_{2} \otimes_R \ldots \otimes_R M_n\) be the sequence of tensor products of pairs of these modules and let
\begin{align*}
\iota:M_1\times \ldots \times M_n \to M_1 \otimes_R \ldots \otimes_R M_n\\
\iota(m_1,\ldots,m_n) = m_1 \otimes_R \ldots \otimes_R m_n.
\end{align*}
Then for every \(R\)-module homomorphism \(\Phi : M_1 \otimes_R \ldots \otimes_R M_n \to L\) the map \(\varphi = \Phi \circ \iota\) is \(n\)-multilinear from \(M_1\times \ldots\times M_n\to L\).\\
If \(\varphi :M_1\times \ldots\times M_n \to L\) is an \(n\)-multilinear map then there is a unique \(R\)-module homomorphism \(\Phi :M_1 \otimes_R \ldots \otimes_R M_n \to L\) such that \(\varphi = \Phi \circ \iota\). This bijection induces a bijection between \(n\)-multilinear maps and \(R\)-module homomorphisms for which the following diagram commutes:
\begin{center}
\begin{tikzpicture}
\matrix (m)
[
matrix of math nodes,
row sep = 3em,
column sep = 4em
]
{
M \times \ldots \times M_n & M \otimes_R \ldots \otimes_R M_n \\
& L \\
};
\path
(m-1-2) edge [->] node [right] {\(\Phi \)} (m-2-2)
(m-1-1) edge [->] node [above] {\(\iota\)} (m-1-2)
(m-1-1) edge [->] node [below] {$\varphi$} (m-2-2);
\end{tikzpicture}
\end{center}
\end{cor}
We once again have a containment condition.
\begin{thm}[Tensor Products of Direct Sums]
Let \(M, M'\) be right \(R\)-modules and let \(N, N'\) be left \(R\)-modules. Then there are unique group isomorphisms
\begin{align*}
(M \oplus M') \otimes_R N \cong (M \otimes_R N) \oplus (M' \otimes_R N)\\
M \otimes_R (N \oplus N') \cong (M \otimes_R N) \oplus (M \otimes_R N')\\
(m,m') \otimes n \mapsto (m \otimes n, m' \otimes n)\\
m \otimes(n,n') \mapsto (m \otimes n, m \otimes n').
\end{align*}
If \(M,M'\) are also \((S,R)\)-bimodules, then these are isomorphisms of left \(S\)-modules. In particular, if \(R\) is commutative, these are isomorphisms of \(R\)-modules.
\end{thm}
Of course, this theorem extends inductively to any finite direct sum of \(R\)-modules (in fact any arbitrary direct sums). In essense, tensor products commute with direct sums.
\begin{cor}
The module obtained from the free \(R\)-module \(N \cong \prescript{}{R}R^{n}\) by extension of scalars from \(R\) to \(S\) is the free \(S\)-module \(\prescript{}{S}S^{n}\):
\begin{align*}
S \otimes_R R^{n} \cong S^{n}
\end{align*}
as left \(S\)-modules.
\end{cor}
\begin{cor}
Let \(R\) be a commutative ring and let \(M \cong R^{s}\), \(N \cong R^{t}\) form free \(R\)-modules with bases \(m_1,\ldots,m_s\) and \(n_1,\ldots,n_t\) respectively. Then \(M \otimes_R N\) forms a free \(R\)-module of rank \(st\) with basis \(m_i \otimes n_j\), \(1\leq i\leq s\) and \(1\leq j\leq t\), so that:
\begin{align*}
R^{s} \otimes_R R^{t} \cong R^{st}.
\end{align*}
\end{cor}
\begin{rmrk}
The tensor product of two free modules of arbitrary rank over a commutative ring is free.
\end{rmrk}
\begin{prop}
Suppose \(R\) is a commutative ring and \(M,N\) form left \(R\)-modules via the standard \(R\)-module structures. Then there is a unique \(R\)-module isomorphism
\begin{align*}
M \otimes_R N \cong N \otimes_R M\\
m \otimes n \mapsto n \otimes m
\end{align*}
\end{prop}
One might think that if \(M=N\), the above conditions are not necessary, but in fact it is not the case. Some tensors do have the property that \(a \otimes b = b \otimes a\) for \(a,b \in M\), which we refer to as symmetric tensors, to be studied later.
\begin{prop}
Let \(R\) be a commutative ring and let \(A,B\) be \(R\)-algebras. Then the multiplication
\begin{align*}
(a \otimes b) (a' \otimes b') = a a' \otimes b b'
\end{align*} is well-defined and makes \(A\otimes_R B\) into an \(R\)-algebra.
\end{prop}
\begin{exmp}
\end{exmp}
% \printindex
\end{document}
| {
"alphanum_fraction": 0.6368454946,
"avg_line_length": 56.3882681564,
"ext": "tex",
"hexsha": "d9f4de8d93b7bddf832b44d997986605d8714e02",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d9f1bfd9e6ea62a9d56292f7890f99c450b54c9b",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "gjgress/LibreMath",
"max_forks_repo_path": "Module Theory/Notes/source/TensorProductsModules.tex",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "d9f1bfd9e6ea62a9d56292f7890f99c450b54c9b",
"max_issues_repo_issues_event_max_datetime": "2021-05-20T23:23:22.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-05-20T22:09:37.000Z",
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "gjgress/Libera-Mentis",
"max_issues_repo_path": "Module Theory/Notes/source/TensorProductsModules.tex",
"max_line_length": 469,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "d9f1bfd9e6ea62a9d56292f7890f99c450b54c9b",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "gjgress/Libera-Mentis",
"max_stars_repo_path": "Module Theory/Notes/source/TensorProductsModules.tex",
"max_stars_repo_stars_event_max_datetime": "2021-07-16T23:18:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-07-16T23:18:15.000Z",
"num_tokens": 7321,
"size": 20187
} |
\section{Yearly Average Temperatures in Falsterbo, 1967-2017}
With the release of the International Panel on Climate Change (IPCC)'s Sixth Assessment Report\cite{IPCC} in August 2021 having further highlighted the global increase in average temperatures the topic has once again returned to the forefront of public discourse. The global increase in average temperatures is hardly a novel line of inquiry, however the analysis of temperature trends locally may still be of interest (particularly to those local to the area in question). The data analysed for this section was measured at a measuring station located in Falsterbo in southwestern Skåne, at a height of 1.541m above sea level, at coordinates (in decimal degrees) 55.3837 latitude, 12.8167 longitude.
The data was provided in CSV-format as part of a SMHI-dataset, and analysed using a custom C++ function and plotted using ROOT. The data was handled using line-by-line reading of the data and slicing into strings, slicing the date and temperature in separate strings, the former being used to further create a substring consisting only of the year. This substring is then converted to an integer and used to check first whether the data in question belongs to the relevant range of years. The entire date string is then checked to see wheter or not the data for a new day is being parsed, if not the temperature data is added to a sum and a count of the number of entries for a specific day increased by one, and if yes, the same actions are performed after first taking the average of the sum, pushing it to a vector containing the daily average temperatures of a whole year and zeroing the sum and entry counter. A similar process is used whenever a new year is being iterated upon, in which case the average of the vector containing the daily averages is calculated using a small utility function which loops over all elements, after which it is stored in both a separate vector and in a TGraph object. This is repeated for all lines containing years in the specified range, after which the rest of the lines are just read and then a graph of the data is then plotted from the TGraph object. Worth noting is that the code is unable to account for the quality of the data (in the data set indicated by a G, for controlled and accepted values, and Y for suspect or aggregated values, from roughly checked archive data or real-time data.), nor detect or account missing data.
\begin{figure}[H]
\centering
\includegraphics[scale=0.65]{Graph5c.PNG}
\caption{The yearly average temperatures measured at Falsterbo during 1967-2017.}
\label{dipole}
\end{figure}
Although no explicit trendline was plotted, the resulting graph still shows a clear upwards trend in temperatures over the period, shifting both high and low peaks towards higher temperatures, which is as expected considering global trends. An abnormaly cold period in the latter half of the 1980's (1986-1988) was noted, although there's no direct indication that this is due to not accounting for missing data or other shortcoming of the code, however the possibility cannot be dismissed.
\section{Refrences}
\begin{thebibliography}{}
\bibitem{IPCC} IPCC, \textit{Climate Change 2021 The Physical Science Basis, Working Group I contribution to the Sixth Assessment Report of the Intergovernmental Panel on Climate Change}
\end{thebibliography} | {
"alphanum_fraction": 0.7966002345,
"avg_line_length": 148.347826087,
"ext": "tex",
"hexsha": "5e5476518944d22010a05128131cff73bffa57e4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5341ae81e9bbd36de71c36933c0328c140a4625e",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "NJJLarsson/MNXB01-project",
"max_forks_repo_path": "report/Graph5c.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5341ae81e9bbd36de71c36933c0328c140a4625e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "NJJLarsson/MNXB01-project",
"max_issues_repo_path": "report/Graph5c.tex",
"max_line_length": 1676,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5341ae81e9bbd36de71c36933c0328c140a4625e",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "NJJLarsson/MNXB01-project",
"max_stars_repo_path": "report/Graph5c.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 734,
"size": 3412
} |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Friggeri Resume/CV
% XeLaTeX Template
% Version 1.2 (3/5/15)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% Original author:
% Adrien Friggeri ([email protected])
% https://github.com/afriggeri/CV
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
% Important notes:
% This template needs to be compiled with XeLaTeX and the bibliography, if used,
% needs to be compiled with biber rather than bibtex.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\documentclass[print]{friggeri-cv} % Add 'print' as an option into the square bracket to remove colors from this template for printing
\usepackage{ragged2e}
\hypersetup{colorlinks=true,urlcolor=blue}
\usepackage[none]{hyphenat}
\usepackage{enumitem}
\setlist[itemize]{leftmargin=*}
\begin{document}
\header{Kevin }{Nause}{\textit{Senior Security Consultant}} % Your name and current job title/field
%----------------------------------------------------------------------------------------
% SIDEBAR SECTION
%----------------------------------------------------------------------------------------
\begin{aside} % In the aside, each new line forces a line break
\section{Contact}
~
425.626.7520
%+0 (000) 111 1112
~
{\scriptsize \href{mailto:[email protected]}{[email protected]}}
%\href{http://www.smith.com}{http://www.smith.com}
{\scriptsize \href{https://linkedin.com/in/kevinnause}{linkedin.com/in/kevinnause}}
{\scriptsize \href{https://github.com/Nauscar}{github.com/Nauscar}}
\section{Programming}
C, C++, Java, C\#,
x86 \& ARM Assembly,
Go, Python, JavaScript
\section{Frameworks}
OpenCL, OpenMP,
Hadoop, Thrift,
Qt, ASP .NET
\section{Interests}
{Digital Photography, Performance Vehicles, Homebrewing}
\section{About}
{I enjoy low level programming on platforms such as embedded systems and operating systems.
Working on wearable hacks and obtaining root access on mobile devices are also side interests.
Computer security and logical analysis are key interests of mine.
I have been a Linux enthusiast since I typed "Hello World" for the first time and have adored penguins ever since.
The first thing I do when I sit down at a computer is change the keyboard layout to Dvorak and plug in a keyboard that is older than myself: the IBM Model M.}
\end{aside}
%----------------------------------------------------------------------------------------
% EDUCATION SECTION
%----------------------------------------------------------------------------------------
\section{Education}
\begin{entrylist}
%------------------------------------------------
\entry
{Sep 2011}
{Apr 2016}
{Bachelor of Applied Science (B.A.Sc.)}
{University of Waterloo}
{Computer Engineering}
%------------------------------------------------
\end{entrylist}
%----------------------------------------------------------------------------------------
% WORK EXPERIENCE SECTION
%----------------------------------------------------------------------------------------
\section{Experience}
\begin{entrylist}
\entry
{Nov 2018}
{(to Present)}
{NCC Group}
{Seattle, Washington}
{\emph{Senior Security Consultant}
\begin{itemize}
\item Design audit for Bootloaders, RTOS, Linux, Android, and Windows
\item Scoped and lead multi-week hardware focused client engagements
\item Hardware teardown, flash dumping, bus probing
\item Reverse engineering C, C\#, and Java binaries
\item Vulnerability assessment, code review, network pentesting
\item ARM shell code creation and control highjacking attacks
\item Automotive (CAN SAE J1939) and Robotics (ROS) security experience
\end{itemize}
Languages Used: C, ARM Assembly, Python, Go \\
}
\entry
{Jul 2016}
{(2 yrs 5 mos)}
{Microsoft}
{Redmond, Washington}
{\emph{Firmware Engineer II}
\begin{itemize}
\item Working on ECs for platforms with Intel CPUs and Nvidia GPUs
\item Experience with power sequencing, battery, and thermal subsystems
\item Implementing inter-bus communications via USB, UART, SPI, I2C, SMBus
\item Working with communication protocols such as TCP/IP, HID, RS-232
\item Proficient with oscilloscopes and logic analyzers
\item Experience with schematics, reference manuals, and errata for hardware peripherals
\end{itemize}
Products: Surface Hub, Surface Laptop \\
Hardware: NXP/Freescale K22 ARM Cortex-M4 \\
Languages Used: C, ARM Assembly, C\#, PowerShell \\
}
\entry
{Aug 2015}
{(5 months)}
{Pebble Technology}
{Kitchener, Ontario}
{\emph{Embedded Firmware Engineer}
\begin{itemize}
\item Implemented device drivers, recovery firmware, and system applications on the Pebble OS (based on FreeRTOS)
\item Primary focus was porting the current firmware to an older device with significantly less flash storage and a black and white screen
\item Optimized anti-aliasing on 8-bit displays, and dithering on 1-bit displays
\end{itemize}
Products: Pebble, Pebble Time, Pebble Time Round \\
Hardware: STM32F4 ARM Cortex-M4, STM32F2 ARM Cortex-M3, TI CC2564
Languages Used: C, ARM Assembly, Python \\
}
\entry
{Jan 2015}
{(4 months)}
{Motorola}
{Kitchener, Ontario}
{\emph{Security Engineer}
\begin{itemize}
\item Discovered and patched vulnerabilities, resource leaks, and concurrency problems in Android OS, Motorola's MSM kernel, and Moto X sensor hub
\item Used static analysis to assist in discovering security vulnerabilities
\item Traced execution flow to isolate false positives or potential exploits
\end{itemize}
Products: Moto E/G/X, Moto 360 \\
Hardware: TI OMAP 3, Qualcomm PM8921 PMIC, NXP 44701 NFC \\
Languages Used: C, C++, Java
}
\end{entrylist}
\goodbreak
\newgeometry{left=2cm, right=2cm}
\renewcommand{\entry}[5]{%
\parbox[t]{1.4cm}{\footnotesize \textbf{#1} \\ \scriptsize\addfontfeature{Color=lightgray} #2}&\parbox[t]{15.8cm}{%
\textbf{#3}%
\hfill%
{\footnotesize\addfontfeature{Color=lightgray} #4}%
\justify #5\vspace{\parsep}%
}\\}
\begin{entrylist}
\entry
{Sep 2014}
{(8 months)}
{Computer Aided Reasoning Group}
{Waterloo, Ontario}
{\emph{Undergraduate Research Assistant, Unviersity of Waterloo}
\begin{itemize}
\item Reported to Professor Vijay Ganesh
\item Researched the topic of SAT solvers and their underlying heuristics
\item Primary focus involved the relevance of backdoor variables and community structure for the VSIDS decision heuristic
\item Experience with static analysis, symbolic execution, and Return Oriented Programming (ROP)
\end{itemize}
Languages Used: C, C++, x86 Assembly, Java \\
}
\entry
{May 2014}
{(4 months)}
{ON Semiconductor}
{Waterloo, Ontario}
{\emph{Embedded Tools Developer}
\begin{itemize}
\item Designed Bluetooth Low Energy GATT services for functions such as data streaming, audio streaming, and status updates
\item Embedded programming with BLE enabled medical devices such as hearing aids, insulin monitors, and heart rate monitors
\item Interfaced with Windows and Android client devices
\end{itemize}
Hardware: Nordic nRF51822 Bluetooth Low Energy Controller, ARM Cortex-M0 \\
Languages Used: C, C++, Java, ARM Assembly \\
}
\entry
{Sep 2013}
{(4 months)}
{eSolutionsGroup}
{Waterloo, Ontario}
{\emph{Mobile Developer}
\begin{itemize}
\item Designed a real-time transit prediction system using GTFS data and protocol buffers
\item Database design, MVC server communications, and mobile application development
\end{itemize}
Languages Used: C\# (ASP .NET), SQL, JavaScript \\
}
\entry
{May 2012}
{(16 months)}
{Regional Municipality of York}
{Richmond Hill, Ontario}
{\emph{Transit Management Systems}
\begin{itemize}
\item Worked with GTFS data and real-time prediction feeds and contributed to the OneBusAway project
\item Hands on work with transit embedded systems and fare management systems
\end{itemize}
Languages Used: C\#, Java \\
}
\end{entrylist}
\section{Projects}
\begin{entrylist}
\entry
{Sep 2015}
{}
{Automated Home Brewery System}
{Brew It Yourself}
{The objective of this project is to combine homebrewing experience with engineering design, and construct a single vessel brewing system. By maintaining a strict control of key parameters, the brewing process is regulated using a combination of fluid mechanics, heat transfer, digital controls, power systems, embedded robotics and mobile development. For more information please see the \href{https://github.com/BrewItYourself/Documentation/blob/master/Final\%20Report/finalreportpdf/final-report.pdf}{Design Overview} on GitHub.}\\
\entry
{Jan 2014}
{}
{Myo DSLR Control}
{Thalmic Labs}
{After being accepted into Thalmic Lab's alpha test program, this project focused on creating an interface between the Myo armband and an Arduino to control the shutter of a DSLR via the remote trigger pin-out and an IR sensor. This concept was than expanded to utilize TCP/IP communications in order to control the camera's shutter at even greater distances and remote locations.}
\end{entrylist}
%
%\section{Awards}
%\begin{entrylist}
%\entry
%{Aug 2011}
%{}
%{Municipal Engineering Award}
%{Municipal Engineers Association}
%{Awarded to the top essay entered in the topic of public interest related to municipal engineering. This sparked a personal interest in the effects of technology on society and their resulting communication networks.}
%------------------------------------------------
%\end{entrylist}
%----------------------------------------------------------------------------------------
% PUBLICATIONS SECTION
%----------------------------------------------------------------------------------------
%
%\section{publications}
%
%\printbibsection{article}{article in peer-reviewed journal} % Print all articles from the bibliography
%
%\printbibsection{book}{books} % Print all books from the bibliography
%
%\begin{refsection} % This is a custom heading for those references marked as "inproceedings" but not containing "keyword=france"
%\nocite{*}
%\printbibliography[sorting=chronological, type=inproceedings, title={international peer-reviewed conferences/proceedings}, notkeyword={france}, heading=bibheading]
%\end{refsection}
%
%\begin{refsection} % This is a custom heading for those references marked as "inproceedings" and containing "keyword=france"
%\nocite{*}
%\printbibliography[sorting=chronological, type=inproceedings, title={local peer-reviewed conferences/proceedings}, keyword={france}, heading=bibheading]
%\end{refsection}
%
%\printbibsection{misc}{other publications} % Print all miscellaneous entries from the bibliography
%
%\printbibsection{report}{research reports} % Print all research reports from the bibliography
%
%----------------------------------------------------------------------------------------
\end{document} | {
"alphanum_fraction": 0.6975152368,
"avg_line_length": 36.9031141869,
"ext": "tex",
"hexsha": "2d492ed2b815c8ae1a0b02061817371aeb4de088",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "feb15b5a409ff0f1b7fed173b30208a4e5ed1dec",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "Nauscar/resume",
"max_forks_repo_path": "kevin-nause-resume.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "feb15b5a409ff0f1b7fed173b30208a4e5ed1dec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "Nauscar/resume",
"max_issues_repo_path": "kevin-nause-resume.tex",
"max_line_length": 535,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "feb15b5a409ff0f1b7fed173b30208a4e5ed1dec",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "Nauscar/resume",
"max_stars_repo_path": "kevin-nause-resume.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2550,
"size": 10665
} |
\section{Introduction}
The {\tt{}mex{\char95}handle} module keeps track of C data structures by numeric
handles, so that Matlab users can refer to C objects by providing a
handle.
\section{Interface}
\nwfilename{mex-handle.nw}\nwbegincode{1}\sublabel{NWmexD-mexC-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-mexC-1}}}\moddef{mex-handle.h~{\nwtagstyle{}\subpageref{NWmexD-mexC-1}}}\endmoddef
#ifndef MEX_HANDLE_H
#define MEX_HANDLE_H
\LA{}exported functions~{\nwtagstyle{}\subpageref{NWmexD-expI-1}}\RA{}
#endif /* MEX_HANDLE_H */
\nwnotused{mex-handle.h}\nwendcode{}\nwbegindocs{2}\nwdocspar
The handle manager object is a statically-allocated singleton.
It is initialized by {\tt{}create} and destroyed by {\tt{}destroy}.
\nwenddocs{}\nwbegincode{3}\sublabel{NWmexD-expI-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-expI-1}}}\moddef{exported functions~{\nwtagstyle{}\subpageref{NWmexD-expI-1}}}\endmoddef
void mex_handle_create ();
void mex_handle_destroy();
\nwalsodefined{\\{NWmexD-expI-2}\\{NWmexD-expI-3}}\nwused{\\{NWmexD-mexC-1}}\nwendcode{}\nwbegindocs{4}\nwdocspar
Each object stored by the handle manager has a data field
and a numeric type tag. New tags can be allocated using
the {\tt{}new{\char95}tag} routine. The zero tag is reserved for empty
handle slots.
\nwenddocs{}\nwbegincode{5}\sublabel{NWmexD-expI-2}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-expI-2}}}\moddef{exported functions~{\nwtagstyle{}\subpageref{NWmexD-expI-1}}}\plusendmoddef
int mex_handle_new_tag();
\nwendcode{}\nwbegindocs{6}\nwdocspar
The {\tt{}add} function allocates a new handle. The {\tt{}find}
function locates a handle that has already been allocated.
Once a handle has been allocated, the associated data and tag fields can
be retrieved with the {\tt{}data} and {\tt{}tag} methods. If
the handle is not valid, {\tt{}data} will return NULL and {\tt{}tag}
will return zero. The {\tt{}remove} function deallocates a handle;
future references to that handle will be invalid.
\nwenddocs{}\nwbegincode{7}\sublabel{NWmexD-expI-3}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-expI-3}}}\moddef{exported functions~{\nwtagstyle{}\subpageref{NWmexD-expI-1}}}\plusendmoddef
int mex_handle_add (void* data, int tag);
int mex_handle_find (void* data, int tag);
int mex_handle_tag (int handle_id);
void* mex_handle_data (int handle_id);
void* mex_handle_remove(int handle_id);
\nwendcode{}\nwbegindocs{8}\nwdocspar
\section{Implementation}
\nwenddocs{}\nwbegincode{9}\sublabel{NWmexD-mexC.2-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-mexC.2-1}}}\moddef{mex-handle.c~{\nwtagstyle{}\subpageref{NWmexD-mexC.2-1}}}\endmoddef
#include <stdio.h>
#include "mex-handle.h"
#include "dynarray.h"
typedef struct mex_handle_t \{
void* data;
int tag;
int id;
\} mex_handle_t;
static dynarray_t handles;
static int next_tag;
static int next_id;
static int free_list;
\LA{}functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}\RA{}
\nwnotused{mex-handle.c}\nwendcode{}\nwbegindocs{10}\nwdocspar
The handles are stored in the dynamic array {\tt{}handles}. Each handle
has a data pointer, a tag, and a field for the handle identifier.
The {\tt{}next{\char95}tag} and {\tt{}next{\char95}id} variables keep track of the number
of tags and handle identifiers that have been allocated over the lifetime
of the handle manager.
\nwenddocs{}\nwbegincode{11}\sublabel{NWmexD-fun9-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\moddef{functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\endmoddef
void mex_handle_create()
\{
handles = dynarray_create(sizeof(mex_handle_t), 16);
next_tag = 1;
next_id = 1;
free_list = -1;
\}
void mex_handle_destroy()
\{
dynarray_destroy(handles);
\}
\nwalsodefined{\\{NWmexD-fun9-2}\\{NWmexD-fun9-3}\\{NWmexD-fun9-4}\\{NWmexD-fun9-5}\\{NWmexD-fun9-6}\\{NWmexD-fun9-7}}\nwused{\\{NWmexD-mexC.2-1}}\nwendcode{}\nwbegindocs{12}\nwdocspar
Allocating a new tag is just a matter of incrementing the {\tt{}next{\char95}tag} flag.
\nwenddocs{}\nwbegincode{13}\sublabel{NWmexD-fun9-2}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-fun9-2}}}\moddef{functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\plusendmoddef
int mex_handle_new_tag()
\{
return next_tag++;
\}
\nwendcode{}\nwbegindocs{14}\nwdocspar
At a high level, the {\tt{}add} and {\tt{}remove} operations are simple, too.
To add a handle, we need only to find a space for it, set the fields,
and assign an identifier. To remove a handle, we grab the handle data,
deallocate the handle, and return the data pointer.
\nwenddocs{}\nwbegincode{15}\sublabel{NWmexD-fun9-3}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-fun9-3}}}\moddef{functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\plusendmoddef
int mex_handle_add(void* data, int tag)
\{
int index;
mex_handle_t* handle;
\LA{}allocate handle~{\nwtagstyle{}\subpageref{NWmexD-allF-1}}\RA{}
handle->data = data;
handle->tag = tag;
handle->id = \LA{}allocate handle identifier~{\nwtagstyle{}\subpageref{NWmexD-allQ-1}}\RA{}
return handle->id;
\}
\nwendcode{}\nwbegindocs{16}\nwdocspar
\nwenddocs{}\nwbegincode{17}\sublabel{NWmexD-fun9-4}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-fun9-4}}}\moddef{functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\plusendmoddef
void* mex_handle_remove(int handle_id)
\{
int index;
mex_handle_t* handle;
void* data;
\LA{}find handle identifier~{\nwtagstyle{}\subpageref{NWmexD-finM-1}}\RA{}
if (handle == NULL)
return NULL;
else \{
data = handle->data;
\LA{}deallocate handle~{\nwtagstyle{}\subpageref{NWmexD-deaH-1}}\RA{}
return data;
\}
\}
\nwendcode{}\nwbegindocs{18}\nwdocspar
The {\tt{}find} function returns the identifier for a handle that has
already been allocated.
\nwenddocs{}\nwbegincode{19}\sublabel{NWmexD-fun9-5}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-fun9-5}}}\moddef{functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\plusendmoddef
int mex_handle_find(void* data, int tag)
\{
int i, n;
mex_handle_t* handle_data = dynarray_data(handles);
n = dynarray_count(handles);
for (i = 0; i < n; ++i) \{
if (handle_data[i].tag == tag && handle_data[i].data == data)
return handle_data[i].id;
\}
return 0;
\}
\nwendcode{}\nwbegindocs{20}\nwdocspar
The {\tt{}tag} and {\tt{}data} functions find a handle and return its data
field (or zero if the handle is invalid).
\nwenddocs{}\nwbegincode{21}\sublabel{NWmexD-fun9-6}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-fun9-6}}}\moddef{functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\plusendmoddef
int mex_handle_tag(int handle_id)
\{
int index;
mex_handle_t* handle;
\LA{}find handle identifier~{\nwtagstyle{}\subpageref{NWmexD-finM-1}}\RA{}
return (handle != NULL) ? handle->tag : 0;
\}
\nwendcode{}\nwbegindocs{22}\nwdocspar
\nwenddocs{}\nwbegincode{23}\sublabel{NWmexD-fun9-7}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-fun9-7}}}\moddef{functions~{\nwtagstyle{}\subpageref{NWmexD-fun9-1}}}\plusendmoddef
void* mex_handle_data(int handle_id)
\{
int index;
mex_handle_t* handle;
\LA{}find handle identifier~{\nwtagstyle{}\subpageref{NWmexD-finM-1}}\RA{}
return (handle != NULL) ? handle->data : NULL;
\}
\nwendcode{}\nwbegindocs{24}\nwdocspar
We keep a free list of deallocated handle slots for re-use, since
it seems likely that they will be deallocated pretty often.
The handle {\tt{}id} field is used to point to the next slot on
the free list, and $-1$ serves the role of a NULL. If we want
a new slot and there are none available from the free list, we
just expand the array to accomodate a new handle.
\nwenddocs{}\nwbegincode{25}\sublabel{NWmexD-allF-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-allF-1}}}\moddef{allocate handle~{\nwtagstyle{}\subpageref{NWmexD-allF-1}}}\endmoddef
if (free_list >= 0) \{
index = free_list;
handle = dynarray_get(handles, index);
free_list = handle->id;
\} else \{
index = dynarray_count(handles);
handle = dynarray_append(handles, NULL);
\}
\nwused{\\{NWmexD-fun9-3}}\nwendcode{}\nwbegindocs{26}\nwdocspar
\nwenddocs{}\nwbegincode{27}\sublabel{NWmexD-deaH-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-deaH-1}}}\moddef{deallocate handle~{\nwtagstyle{}\subpageref{NWmexD-deaH-1}}}\endmoddef
handle->tag = 0;
handle->data = NULL;
handle->id = free_list;
free_list = index;
\nwused{\\{NWmexD-fun9-4}}\nwendcode{}\nwbegindocs{28}\nwdocspar
A natural way to pick handle identifiers would be to use the
array index of the identifier. There is a problem
with this approach, though. We would like small,
common numbers like $0$ and $1$ \emph{not} to be valid handles.
It makes it more difficult for the user to accidentally retrieve
a valid handle that was not returned to him. Similarly, we would
like the handles not to be too close together, so that taking a
valid handle and adding one to it doesn't yield another valid handle.
We can get a more scattered space of handles by putting some unique
identifying information in the high order bits and using the low
order bits for index information. For the moment, we'll use the
lower ten bits for indexing. I doubt we'll ever have more than 256
simultaneously active handles, so the for loop used to retrieve
the handle identifiers will probably never go past the first iteration
unless there is an error.
\nwenddocs{}\nwbegincode{29}\sublabel{NWmexD-allQ-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-allQ-1}}}\moddef{allocate handle identifier~{\nwtagstyle{}\subpageref{NWmexD-allQ-1}}}\endmoddef
(next_tag++ << 8) + (index & 255);
\nwused{\\{NWmexD-fun9-3}}\nwendcode{}\nwbegindocs{30}\nwdocspar
\nwenddocs{}\nwbegincode{31}\sublabel{NWmexD-finM-1}\nwmargintag{{\nwtagstyle{}\subpageref{NWmexD-finM-1}}}\moddef{find handle identifier~{\nwtagstyle{}\subpageref{NWmexD-finM-1}}}\endmoddef
for (index = (handle_id & 255);
(handle = dynarray_get(handles, index)) != NULL &&
handle->id != handle_id;
index += 256);
\nwused{\\{NWmexD-fun9-4}\\{NWmexD-fun9-6}\\{NWmexD-fun9-7}}\nwendcode{}
\nwixlogsorted{c}{{allocate handle}{NWmexD-allF-1}{\nwixu{NWmexD-fun9-3}\nwixd{NWmexD-allF-1}}}%
\nwixlogsorted{c}{{allocate handle identifier}{NWmexD-allQ-1}{\nwixu{NWmexD-fun9-3}\nwixd{NWmexD-allQ-1}}}%
\nwixlogsorted{c}{{deallocate handle}{NWmexD-deaH-1}{\nwixu{NWmexD-fun9-4}\nwixd{NWmexD-deaH-1}}}%
\nwixlogsorted{c}{{exported functions}{NWmexD-expI-1}{\nwixu{NWmexD-mexC-1}\nwixd{NWmexD-expI-1}\nwixd{NWmexD-expI-2}\nwixd{NWmexD-expI-3}}}%
\nwixlogsorted{c}{{find handle identifier}{NWmexD-finM-1}{\nwixu{NWmexD-fun9-4}\nwixu{NWmexD-fun9-6}\nwixu{NWmexD-fun9-7}\nwixd{NWmexD-finM-1}}}%
\nwixlogsorted{c}{{functions}{NWmexD-fun9-1}{\nwixu{NWmexD-mexC.2-1}\nwixd{NWmexD-fun9-1}\nwixd{NWmexD-fun9-2}\nwixd{NWmexD-fun9-3}\nwixd{NWmexD-fun9-4}\nwixd{NWmexD-fun9-5}\nwixd{NWmexD-fun9-6}\nwixd{NWmexD-fun9-7}}}%
\nwixlogsorted{c}{{mex-handle.c}{NWmexD-mexC.2-1}{\nwixd{NWmexD-mexC.2-1}}}%
\nwixlogsorted{c}{{mex-handle.h}{NWmexD-mexC-1}{\nwixd{NWmexD-mexC-1}}}%
\nwbegindocs{32}\nwdocspar
\nwenddocs{}
| {
"alphanum_fraction": 0.7285752664,
"avg_line_length": 42.1396226415,
"ext": "tex",
"hexsha": "ea4e7aa31789b6b7c2150f46784ec34f30f27354",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "699534852cb37fd2225a8b4b0072ebca96504d23",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "davidgarmire/sugar",
"max_forks_repo_path": "sugar30/src/tex/mex-handle.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "699534852cb37fd2225a8b4b0072ebca96504d23",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "davidgarmire/sugar",
"max_issues_repo_path": "sugar30/src/tex/mex-handle.tex",
"max_line_length": 218,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "699534852cb37fd2225a8b4b0072ebca96504d23",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "davidgarmire/sugar",
"max_stars_repo_path": "sugar30/src/tex/mex-handle.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3975,
"size": 11167
} |
\input{template.tex}
\title{Reminder on ordinal arithmetic}
\newcommand{\cf}{\mathrm{cf}}
\begin{document}
\maketitle
\part{Definitions}
\section{Well-orders}
Ordinals are the mother of all well-orders. A total order is called a \emph{well-order} if one the following equivalent conditions applies
\begin{itemize}
\item Any nonempty set of element has a least element
\item There is no infinite descending sequence of elements
\end{itemize}
Every subset of a well-ordered set is again well-ordered. Every element $a \in P$ that is not maximal has an immediate successor $a^+ = \min \{ b : b > a \}$. Every bounded subset of a well-order has a supremum. For every element $a \in P$, $a$ is either the minimal element, a \emph{successor element} ($a = b^+$) or a so-called \emph{limit element}. In this case, it is the supremum of the elements strictly below it. By the axiom of choice, every set can be well-ordered. \\
\textbf{Proposition} [Trichotomy]
If $P,Q$ are two well-orders, then we have the following trichotomy
\begin{itemize}
\item $P$ is order-isomorphic to a proper initial segment of $Q$
\item $P$ is order-isomorphic to $Q$
\item A proper initial segment of $P$ is order-isomorphic to $Q$.
\end{itemize}
We call two well-orders \emph{of the same order-type} if they are order-isomorphic. The order-types are thus totally ordered by `is isomorphic to an initial segment of'. This will turn out to be a well-order.
\section{Ordinals}
Ordinal numbers are certain well-ordered sets, serving as distinguished representatives for order-types. For every well-ordered set $P$, there is a unique ordinal $\alpha$ such that $\alpha$ and $P$ are order-isomorphic. The proper class of ordinal numbers is well-ordered, and every well-ordered set is order-isomorphic to an initial segment of the ordinal numbers.
We use the von Neumann-construction of ordinals, i.e. for every ordinal $\lambda$, we let
\[ \lambda = \{ \alpha : \alpha < \lambda \} = [0,\lambda) \]
as a set. We have for all ordinals $\alpha,\lambda$ that
\[ \alpha < \lambda \Leftrightarrow \alpha \in \lambda \Leftrightarrow \alpha \subset \lambda \Leftrightarrow \alpha \text{ proper initial segment of } \lambda \]
The ordinals are as sets defined recursively via
\[
\begin{cases}
0 = \emptyset \\
\alpha^+ = \alpha \cup \{\alpha\} \\
\lambda = \bigcup_{\alpha < \lambda} \alpha
\end{cases}
\]
For example we have $2 = \{0,1\} = \{0,\{0\}\} = \{\emptyset, \{\emptyset\}\}$ and we get the first transfinite ordinal $\omega$ via
\[ \omega := \bigcup_{n \text{ finite }} n = \{0,1,2,\ldots\} = \mathbb N \]
and its successor
\[ \omega^+ := \mathbb N \cup \{ \mathbb N \} = \{0,1,2,\ldots\, \mathbb N \}. \]
Every \emph{set} $\Gamma$ of ordinals has a supremum in the ordinals, and we use the following three notations interchangably.
\[ \sup \Gamma = \lim_{\gamma \in \Gamma} \gamma := \bigcup_{\gamma \in \Gamma} \gamma \]
\section{Cardinals}
We model cardinals as special ordinals, namely we identify every cardinal $\mathfrak a$ with the least ordinal $\alpha$ that is in bijection with $\mathfrak a$. Note that now the cardinal $\alpha$ is itself the canonical representative of a set of cardinality $\alpha$, as
\[ |\alpha| = |\{ \beta : \beta < \alpha \}| = \alpha. \]
Note that $\omega$ is the first infinite ordinal and thus a cardinal
\[ \omega =: \aleph_0 \]
Cardinals are again well-ordered, leading to their own notion of cardinal sucessors. We can thus enumerate the infinite cardinals by ordinals
\[
\begin{cases}
\aleph_0 = \omega \\
\aleph_{\alpha^+} = (\aleph_\alpha)^+ \\
\aleph_\lambda = \bigcup_{\alpha < \lambda} \aleph_\alpha
\end{cases}
\]
We let $\omega_{\alpha} = \aleph_{\alpha}$, so e.g. $\omega_1$ is the first uncountable ordinal. \\
\textbf{Nonexample:} Let's appreciate some of the non-intuitiveness of $\omega_1$. It is an uncountable well-ordered set, but every initial segment is countable. Unlike $\omega = \sup \{0,1,2,\ldots\}$, $\omega_1$ is not the supremum of any countable subset. Let $\omega_1 = \sup \Gamma, \Gamma \subseteq \omega_1$ countable, then
\[ \omega_1 = \bigcup \Gamma, \] thus $\omega_1$ is countable union of countable sets, hence countable; contradiction.
\section{Cofinality}
The phenomenon from above is captured by the notion of cofinality. A subset $N \subseteq P$ of an ordered set is called \emph{cofinal} if
\[ \forall p \in P \exists n \in N : p \leq n. \]
The cofinality $\cf(P)$ is the least cardinality of a cofinal subset of $P$. We have $\cf(P) = 1$ iff $P$ has a maximial element, otherwise at least $\cf(P) \geq \omega$. \\
For an ordinal $\alpha$, $\cf(\alpha)$ is the least cardinality of a subset $\Gamma \subseteq \alpha$ with $\alpha = \sup \Gamma$. We have shown that $\cf(\omega_1) = \omega_1$. \\
A cardinal $\lambda$ is called \emph{regular} iff $\cf(\lambda) = \lambda$. \\
\textbf{Proposition: } For every successor cardinal $\alpha$, $\aleph_\alpha$ is regular. For every limit ordinal $\lambda$, we have $\cf(\aleph_\lambda) = \cf(\lambda)$ instead. \\
\textbf{Example: } $\aleph_\omega$ is not regular, as
\[ \cf(\aleph_\omega) = \cf(\omega) = \omega. \]
In fact, by definition $\aleph_\omega = \sup \{ \aleph_0, \aleph_1, \ldots \}$ is supremum of a countable subset.
\section{Topology}
On every totally ordered set, there is an induced topology called the \emph{order topology} given by the basis of open rays and open intervals. For example, the order topology on $\mathbb R$ with the usual order is just the euclidean one.
Every ordinal $\alpha$ becomes a topological space of its own in the order topology. For example the ordinal $\omega$ is homeomorphic to the subspace $\mathbb N \subset \mathbb R$, whereas $\omega^+$ is homeomorphic to
\[ \{ 1 - 1/n : n = 1,2, \ldots\} \cup \{ 1 \}. \]
At some point, larger ordinals will not be metric spaces any more. \\
\pagebreak
\part{Ordinal arithmetic}
\section{Addition}
Ordinal arithmetic is defined recursively with recursion on the right operand. This leads to a certain asymmetry and enforces certain rules only on the right side.
\[
\begin{cases}
\alpha + 0 = \alpha \\
\alpha + (\beta^+) = (\alpha + \beta)^+ \\
\alpha + \lambda = \bigcup_{\beta < \lambda} \alpha + \beta
\end{cases}
\]
\textbf{Proposition: } Addition is strictly monotonic and continuous in the right argument. Addition is associative
\textbf{Non-examples: } Addition is merely non-decreasing in the left argument and not continuous. Addition is not commutative.
We have
\[ 2 + \omega = \bigcup_{n < \omega} 2 + n = \sup \{ 2, 3, 4, \ldots \} = \omega. \]
From there, we get $0 + \omega = \omega = 2 + \omega$ and $\omega + 2 \neq 2 + \omega$. Regarding continuity
\[ \omega + 2 = \sup \{0,1,2,\ldots\} + 2 \neq \sup \{0 + 2, 1 + 2, \ldots\} = \omega. \]
\textbf{Proposition} Let $P,Q$ be two well-orders with order types $\alpha,\beta$. Then $\alpha+\beta$ is the order type of the well-order on $P \sqcup Q$ where every element of $P$ comes before every element of $Q$. E.g.
\[ \omega + \omega + 1 \cong \{ 0 < 1 < 2 < \ldots 0' < 1' < 2' < \ldots 0'' \} \]
\section{Ordinal multiplication}
We define multiplication by continuity and distributivity on the right.
\[
\begin{cases}
\alpha \cdot 0 = 0 \\
\alpha \cdot (\beta + 1) = (\alpha \cdot \beta) + \alpha \\
\alpha \cdot \lambda = \bigcup_{\beta < \lambda} \alpha \cdot \beta
\end{cases}
\]
\textbf{Proposition: } Multiplication is strictly monotonic and continuous on the right and associative. $1$ is both-sided identity.
\textbf{Non-examples: } Multiplication is merely non-decreasing in the left argument and not continuous. Multiplication is not commutative and not distributive in the left argument. \\
We have $\omega \cdot 2 = \omega + \omega$, whereas
\[ 2 \cdot \omega = \bigcup_{n<\omega} 2\cdot n = \sup \{ 0, 2, 4, \ldots \} = \omega. \]
Therefore we get $\omega \cdot 2 \neq 2 \cdot \omega$ and $1\cdot \omega = 2\cdot \omega \neq (1+1)\times \omega$. Regarding continuity,
\[ \omega \cdot \omega = \sup \{ 1, 2, 3, \ldots \} \cdot \omega \neq \sup \{ 1 \cdot \omega, 2 \cdot \omega, 3 \cdot \omega, \ldots \} = \sup \{ \omega, \omega, \omega, \ldots \}. \]
\textbf{Proposition: } Let $P,Q$ be two well-orders with order types $\alpha,\beta$. Then $\alpha \cdot \beta$ is the order type of $P \times Q$ regarded as $Q$ copies of $P$ (reverse lexical order), i.e.
\[ \omega\cdot 2 \cong \{ 0 < 1 < 2 < \ldots 0' < 1' < 2' < \ldots \} \cong \omega + \omega \]
whereas
\[ 2 \cdot \omega \cong \{ 0 < 1 < 0' < 1' < 0'' < 1'' < \ldots \} \cong \omega \]
\section{Ordinal exponentiation}
Ordinal exponentation is defined recursively with continuity and the power-law $\alpha^{\beta + \gamma} = \alpha^\beta \cdot \alpha^\gamma$ in mind.
\[
\begin{cases}
\alpha^0 = 1 \\
\alpha^{\beta + 1} = \alpha^\beta \cdot \alpha \\
\alpha^\lambda = \bigcup_{\beta < \lambda} \alpha^\beta
\end{cases}
\]
\textbf{Proposition} Exponentation is strictly monotonic and continuous in the exponent. It respects multiplication in the exponent, i.e. $\alpha^{\beta\gamma} = (\alpha^\beta)^\gamma$.
\textbf{Non-examples: } Exponentiation is merely non-decreasing and not continuous in the base. It does not respect multiplication in the base.
First of all, for all $1 < k < \omega$ we have
\[ k^\omega = \sup \{ k^n : n < \omega \} = \omega \]
Thus $2^\omega = 3^\omega$. Also
\[ \omega^2 \neq \sup \{ n^2 : n < \omega \} = \omega. \]
Regarding multiplication in the base, using associativity, we get
\[ (\omega \cdot 2)^2 = (\omega \cdot 2)(\omega \cdot 2) = \omega \cdot (2\omega) \cdot 2 = \omega \cdot \omega \cdot 2 = \omega^2 \cdot 2. \]
But then $(\omega \cdot 2)^2 \neq \omega^2 \cdot 2^2 = \omega^2 \cdot 4$.
\textbf{Proposition}
Let $P,Q$ be two well-orders with order types $\alpha, \beta$. Then $\alpha^\beta$ is the order type of the set of function with finite support $\alpha^{(\beta)}$ in reverse lexical order.
\section{Fixed-points}
Consider the power towers
\[ \omega \uparrow\uparrow 1 = \omega, \omega \uparrow \uparrow (n + 1) = \omega^{\omega \uparrow \uparrow n}. \]
We define their supremum as
\[ \epsilon_0 := \sup \{ \omega \uparrow \uparrow n : n < \omega \}. \]
\textbf{Proposition: } $\epsilon_0$ satisfies the equation $\omega^{\epsilon_0} = \epsilon_0$.
By continuity in the exponent, we get
\[ \omega^{\epsilon_0} = \sup_{n < \omega} \left(\omega^{\omega \uparrow \uparrow n}\right) = \sup_{n < \omega} (\omega \uparrow \uparrow (n+1)) = \epsilon_0 \]
and $\epsilon_0$ is the least fixed point of $\omega^\cdot$. \\
\textbf{Proposition} $\epsilon < \omega_1$, i.e. $\epsilon_0$ is still countable.
This is because for infinite ordinals of cardinality at most $\kappa$, all arithmetic operations still produce ordinals of size at most $\kappa$. Note that by our constructions, the resulting ordinals are in bijection with just sums, products and functions with finite support of their underlying sets.
Now
\[ \epsilon_0 = \bigcup_{n < \omega} \omega \uparrow \uparrow n \]
is countable union of countable sets, thus countable. \\
Note that we can produce other fixed-points for the continuous arithmetical operations in the very same fasion
\begin{align*}
\omega^{\epsilon_0} &= \epsilon_0 \\
\omega\cdot \omega^\omega &= \omega^\omega \\
\omega + \omega^2 &= \omega^2 \\
1 + \omega &= \omega.
\end{align*}
\section{Cantor normal form}
The ordinals in the interval $[0,\epsilon_0)$ are closed under ordinal addition, multiplication and exponentation. Every number $\alpha$ in that interval can be expressed as a unique so-called \emph{Cantor normal form} (CNF).
\[ \alpha = \omega^{\beta_1}c_1 + \ldots + \omega^{\beta_n}c_n \]
where $\alpha > \beta_1 > \ldots > \beta_n$ and $c_i \in \mathbb N$.
We can recursively transform the exponents $\beta_i$ in CNF and get a hereditary representation in base $\omega$ after finitely many steps. We see that $[0,\epsilon_0)$ is actually generated by the natural numbers and $\omega$ under the arithmetical operations. Arithmetic in CNF has simple computational descriptions.
\subsection{Order}
CNFs can be compared lexicographically. Highest exponents first, then coefficients.
\subsection{Addition}
We see that
\[
\omega^\beta c + \omega^{\beta'}c' = \begin{cases}
\omega^\beta c + \omega^{\beta'}c' & \text{ if } \beta > \beta' \\
\omega^\beta (c+c') & \text{ if } \beta = \beta' \\
\omega^{\beta'}c' & \text{ if } \beta < \beta'
\end{cases}
\]
If exponents are decreasing, they are already in CNF. If exponents are the same, simplify. If exponents are increasing, the bigger term absorbs the smaller one.
\textbf{Example}
\[ \omega^2 + \omega^3 = \omega^2 + \omega^2\omega = \omega^2(1+\omega) = \omega^2\omega = \omega^3. \]
Thus in order to add a term to a CNF, insert it at the correct position, simplify and drop all further terms.
\subsection{Multiplication}
Multiplication is distributive on the right. Thus we just need to understand how to multiply a CNF with a single term on the right. Let
\[ \alpha = \omega^{\beta_1}c_1 + \ldots + \omega^{\beta_n}c_n \]
then for $\beta > 0$
\[ \alpha \cdot \omega^\beta = (\omega^{\beta_1}c_1 + \ldots + \omega^{\beta_n}c_n) \cdot \omega^\beta = \omega^{\beta_1 + \beta}. \]
Through repeated summation, we get for $n \in \mathbb N \setminus \{0\}$
\[ \alpha \cdot n = \omega^{\beta_1}c_1 n + \ldots + \omega^{\beta_n}c_n. \]
So multiplication just acts on the highest term. \textbf{For example}
\[ (\omega^3 + \omega^2)\cdot 3 = \omega^3 + \omega^2 + \omega^3 + \omega^2 + \omega^3 + \omega^2 = \omega^3\cdot 3 + \omega^2. \]
In the limit case, we get
\begin{align*}
(\omega^3 \cdot 2 + \omega^2)\omega &= \sup_{n < \omega} (\omega^3 \cdot 2 + \omega^2)n \\
&= \sup_{n < \omega} (\omega^3 \cdot 2n + \omega^2) \\
&= \omega^3 \omega = \omega^4.
\end{align*}
\subsection{Exponentiation}
Exponentiation with base $\omega$ is easy. For any $\beta$ in CNF, $\omega^\beta$ is already in CNF. Exponentiation with arbitrary base is more complicated. Let again
\[ \alpha = \omega^{\beta_1}c_1 + \ldots + \omega^{\beta_n}c_n \]
By the power laws
\[
\alpha^{\beta + \gamma} = \alpha^\beta \cdot \alpha^\gamma, \quad \alpha^{\beta \gamma} = (\alpha^\beta)^\gamma, \]
we only need to unterstand taking powers with a single term. We distinguish finite and transfinite terms.
\subsubsection{Finite exponents}
For $0 < r < \omega$, we can compute $\alpha^r$ by repeated multiplication. \\
\subsubsection{Transfinite exponents}
For exponents $\omega^\beta$ with $\beta > 0$, first distinguish if $\alpha$ is finite. If it is, note that
\[ \alpha^\omega = \omega, \]
so if $\beta$ is finite, then
\[ \alpha^{\omega^\beta} = \omega^{\omega^{\beta-1}}. \]
If $\beta$ is transfinite, then $\beta = 1 + \beta$, so
\[ \alpha^{\omega^\beta} = \alpha^{\omega^{1+\beta}} = \alpha^{\omega\omega^\beta} = \omega^{\omega^\beta}. \]
If $\alpha$ is transfinite, we simply get
\[ \alpha^{\omega^\beta} = \omega^{\beta_1\omega^\beta}. \]
\textbf{Example}
\begin{align*}
(\omega^2 \cdot 2)^\omega &= \sup_{n<\omega} (\omega^2 \cdot 2)^n \\
&= \sup_{n < \omega} \omega^{2n}\cdot 2 \\
&= \omega^\omega.
\end{align*}
\section{Goodstein's sequence}
Goodstein's theorem is a very surprising theorem in number theory. Fix a basis $b \geq 2$. Take any number $n$ and write it in base-$b$,
\[ n = b^{k_1}c_1 + \ldots + b^{k_\ell} c_\ell. \]
Now $n > k_1 > \ldots > k_\ell$, so we can write the exponents in base-$b$ again and obtain a hereditary base-$b$ representation. \textbf{Example}
\[ 13 = 8 + 4 + 1 = 2^3 + 2^2 + 1 = 2^{2+1} + 2^2 + 1. \]
Fix a number $n$. We define the \emph{Goodstein sequence} $g^n(2), g^n(3), \ldots$ recursively. Let $g^n(2) = n$. Now write $g^n(2)$ in hereditary base-$2$ and bump the base, i.e. replace all $2$s by $3$s. Subtract one. That gives us $g^n(3)$. Repeat that, so in general
\[ g^n(b+1) = \text{ write $g^n(b)$ in base-$b$, replace $b\to b+1$, subtract $1$}. \]
\textbf{For example}
\begin{align*}
g^4(2) &= 4 = 2^2 \\
g^4(3) &= 3^3 - 1 = 26 \\
&= 3^2\cdot 2 + 3\cdot 2 + 2 \\
g^4(4) &= 4^2 \cdot 2 + 4 \cdot 2 + 1 \\
g^4(5) &= 5^2 \cdot 2 + 5 \cdot 2 \\
g^4(6) &= 6^2 \cdot 2 + 6 \cdot 2 - 1 \\
&= 6^2 \cdot 2 + 6 + 5 \\
g^4(7) &= 7^2 \cdot 2 + 7 + 4
\end{align*}
The numerical values of this sequence are
\[ 4, 26, 41, 60, 83, 109, \ldots \]
\textbf{Theorem: }[Goodstein] For every integer $n$, the Goodstein sequence $g^n$ eventually becomes $0$. \\
The thing is: This takes long, extreemly, ridiculously long. The Goodstein function $G : \mathbb N \to \mathbb N$
\[ G(n) = \min \{ b : g^n(b) = 0 \} \]
is one of the fastest-growing functions ocurring in mathematics. We have
\[ G(3) = 7, \] that is $g^3$ terminates at base $7$. How long might $g^4$ need? We have
\[ G(4) = 3\cdot 2^{402653211}-1. \]
That is a number with roughly 121 million \emph{decimal digits}. The numbers of the sequence are of the same order of magnitude. Even though the numbers seem to get larger and larger, something forces them to become 0 at the end. They seem to lose some complexity throughout the operations. We can precisely pin down this complexity using ordinals and CNF. \\
\textbf{Proof of Goodstein's theorem} We know how the number $g^n(b)$ can be written in hereditary base-$b$. Define a second sequence $w^n(b)$ of ordinals in CNF by taking that representation and replacing $b$ with $\omega$. For example
\begin{align*}
w^4(2) &= \omega^\omega \\
w^4(3) &= \omega^2 \cdot 2 + \omega \cdot 2 + 2 \\
w^4(4) &= \omega^2 \cdot 2 + \omega \cdot 2 + 1 \\
w^4(5) &= \omega^2 \cdot 2 + \omega \cdot 2 \\
w^4(6) &= \omega^2 \cdot 2 + \omega + 5
\end{align*}
This sequence of ordinals is strictly decreasing, thus can only have finite length. So $g^n$ can only have finitely many elements before stabilizing at zero.
\end{document} | {
"alphanum_fraction": 0.6766571252,
"avg_line_length": 55.3322981366,
"ext": "tex",
"hexsha": "49bceae82831bf8f762f00cecf54ebb87f09d2e4",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "cbf82f1916b37132c3da4ca17a31465859ab2260",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "damast93/ordinals",
"max_forks_repo_path": "doc/ordinals.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "cbf82f1916b37132c3da4ca17a31465859ab2260",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "damast93/ordinals",
"max_issues_repo_path": "doc/ordinals.tex",
"max_line_length": 477,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "cbf82f1916b37132c3da4ca17a31465859ab2260",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "damast93/ordinals",
"max_stars_repo_path": "doc/ordinals.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5777,
"size": 17817
} |
\documentclass[a4paper, 12pt]{article}
\input a4size
\title{ENCE461 Schematic Review}
\author{M.P. Hayes}
\date{}
\begin{document}
\maketitle
\begin{center}
\textbf{Bring your schematics, printed on A3 paper}
\end{center}
\section{Common}
\begin{enumerate}
\item Student names and group number in title block
\item Battery fusing (this is mandatory)
\item Can be powered from USB
\item Use serial wire debug interface for programming
\item 3.3\,V MCU regulator can be back driven
\item Short circuit protection for MCU pio pins going to external headers
\item Battery voltage monitoring
\item Do the analogue inputs to the MCU exceed 3.3 V?
\item LEDs for debugging
\item Jumpers for mode configuration
\item Pullup resistors on TWI bus
\item Test points
\item Ground test points
\item Game board interface connects to USART (TXD0/PA6 or TXD1/PA22 to
TXD, RXD0/PA5 or RXD1/PA21 to RXD)
\item USB has series termination resistors
\item VBUS detection through voltage divider to PIO pin. This is
needed so that the MCU can tell when USB is plugged in or removed.
You will also need diodes (or jumpers) so that the USB 5\,V can be
connected to the 5\,V from the switching regulator.
\item Power supply filtering for radio (recommend ferrite bead or
resistor in series with power rail with parallel capacitor)
\item The radio needs to be connected to SPI pins (MISO/PA12,
MOSI/PA13, SCK/PA14)
\item TWI uses TWCK0/PA4 and TWD0/PA3 or TWCK1/PB5 and TWD1/PB4.
\item SAM4S erase pin on testpoint
\item SAM4S has 12\,MHz crystal
\item Reset button connected to NRST pin
\item Power on/off button connected to WKUPn pin
\item Avoid PB4--PB5 for general I/O (they default to JTAG pins on
reset but can be reconfigured in software)
\item Have external pull-down resistors to ensure chips are disabled on
power-up
\item Have a few spare PIO pins connected to pads for last minute mods.
\end{enumerate}
\section{Hat board}
\begin{enumerate}
\item Battery can be charged from USB
\item Fall-back option if IMU does not work
\item Nav-switch or joystick for remote control
\item Drive circuit for piezo tweeter
\end{enumerate}
\section{Racer board}
\begin{enumerate}
\item Fall-back option to drive motors via servo interface using PWM
if H-bridge driver fails
\item MOSFET(s) for actuator (if use p-channel MOSFET need transistor
to provide sufficient gate voltage to turn MOSFET off)
\item H-bridge driven by four PWM signals (it is best to use PWMHx,
note PWMLx and PWMHx are complementary)
\item H-bridge AISEN and BISEN pins connected to ground (unless using
current control)
\end{enumerate}
\end{document}
| {
"alphanum_fraction": 0.7253020611,
"avg_line_length": 24.0512820513,
"ext": "tex",
"hexsha": "eda5dd8335f66c396b0399f63c6b651778d36c23",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3a0ef3e75e5ca20a0aceb037306e8de5571602f7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ana104-collab/Wacky-Racer-Embedded-Systems",
"max_forks_repo_path": "doc/schematic-review/schematic-review.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3a0ef3e75e5ca20a0aceb037306e8de5571602f7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ana104-collab/Wacky-Racer-Embedded-Systems",
"max_issues_repo_path": "doc/schematic-review/schematic-review.tex",
"max_line_length": 74,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3a0ef3e75e5ca20a0aceb037306e8de5571602f7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ana104-collab/Wacky-Racer-Embedded-Systems",
"max_stars_repo_path": "doc/schematic-review/schematic-review.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 744,
"size": 2814
} |
%
% @author Shmish "[email protected]"
% @legal MIT "(c) Christopher Schmitt"
%
\documentclass{article}
%
% Document Imports
%
\usepackage{fancyhdr}
\usepackage{extramarks}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{amsfonts}
\usepackage{color}
\usepackage{tikz}
%
% Document Configuation
%
\newcommand{\hwAuthor}{Christopher Schmitt}
\newcommand{\hwSubject}{Math 218}
\newcommand{\hwSection}{Section 81}
\newcommand{\hwSemester}{Summer 2019}
\newcommand{\hwAssignment}{Assignment 5}
%
% Document Enviornments
%
\setlength{\headheight}{65pt}
\pagestyle{fancy}
\lhead{\hwAuthor}
\rhead{
\hwSubject \\
\hwSection \\
\hwSemester \\
\hwAssignment
}
\newenvironment{problem}[1]{
\nobreak\section*{Problem #1}
}{}
%
% Document Start
%
\begin{document}
\begin{problem}{1}
\begin{center}
\textbf{(a)} Prove that the set $12\mathbf{Z}$ of all multiples of $12$ is countably infinite.
\end{center}
\begin{center}
\begin{tikzpicture}[scale=1]
\node (1) at (0, 1) {$1$};
\node (2) at (1, 1) {$2$};
\node (3) at (2, 1) {$3$};
\node (4) at (3, 1) {$4$};
\node (5) at (4, 1) {$5$};
\node (6) at (5, 1) {$6$};
\node (7) at (6, 1) {$\dots$};
\node (0) at (0, 0) {$0$};
\node (12) at (1, 0) {$12$};
\node (-12) at (2, 0) {$-12$};
\node (24) at (3, 0) {$24$};
\node (-24) at (4, 0) {$-24$};
\node (36) at (5, 0) {$36$};
\node (-36) at (6, 0) {$\dots$};
\draw (1) -- (0);
\draw (2) -- (12);
\draw (3) -- (-12);
\draw (4) -- (24);
\draw (5) -- (-24);
\draw (6) -- (36);
\end{tikzpicture}
\end{center}
\begin{center}
\textbf{(b)} Prove that the set $\mathbf{Z}^{+} \times \mathbf{Z}^{+}$ is countably infinite.
\end{center}
\begin{center}
\begin{tikzpicture}[scale=1.5]
\node (1) at (1, 1) {$(1, 1)$};
\node (2) at (1, 2) {$(1, 2)$};
\node (3) at (2, 1) {$(2, 1)$};
\node (4) at (3, 1) {$(3, 1)$};
\node (5) at (2, 2) {$(2, 2)$};
\node (6) at (1, 3) {$(1, 3)$};
\node (7) at (1, 4) {$(1, 4)$};
\node (8) at (2, 3) {$(2, 3)$};
\node (9) at (3, 2) {$(3, 2)$};
\node (10) at (4, 1) {$(4, 1)$};
\draw (1) -- (2) -- (3) -- (4) -- (5) -- (6) -- (7) -- (8) -- (9) --(10);
\end{tikzpicture}
\end{center}
\end{problem}
\begin{problem}{2}
\begin{center}
\textbf{(a)} Prove that the set ${x \in \mathbf{R} | 0.53 < x < 0.54}$ (in other words, the interval (0.53, 0.54) is uncountable.
\end{center}
\begin{center}
$f : \mathbf{R} \rightarrow (0.53, 0.54)$\\
$f(x) = tan(50\pi(x - 0.535))$\\
Since a bijection exists between $\mathbf{R}$ and $(0.53, 0.54)$, $(0.53, 0.54)$ is uncountable.
\end{center}
\begin{center}
\textbf{(b)} Let $A$ be the set of all infinite sequences of positive integers. For example, one of the elements of $A$ is the sequence $1, 2, 3, 4, 5, 6, \dots$ Another element of $A$ is the sequence $1, 2, 4, 8, 16, 32, \dots$ Prove that $A$ is uncountable.
\end{center}
\begin{proof}
Suppose that the set $A$ is countably infinite.\\
$\implies$ A bijection exists between $A$ and $\mathbf{Z^{+}}$\\
$\implies$ Some function, $f : \mathbf{Z^{+}} \rightarrow A$ is onto\\
$\implies$ A list can be created matching every member of $A$ to $\mathbf{Z^{+}}$\\
However, an element can be constructed that is not in $A$: $x_1, x_2, x_3, ...$, where $x_n$ is any random integer such that $x_n$ is not equal to $k$, where $k$ is the $n$'th element of $n$'th element of the list. So $f$ cannot be onto.
\end{proof}
\end{problem}
\begin{problem}{3}
Find the quotient and the remainder when $a$ is divided by $b$, for the following values of $a$ and $b$:
\begin{enumerate}
\item[\textbf{(a)}] $a = 148$, $b = 9$, $q = 16$, $r = 4$
\item[\textbf{(b)}] $a = -148$, $b = 9$, $q = -17$, $r = 5$
\item[\textbf{(c)}] $a = 148$, $b = -9$, $q = -17$, $r = 5$
\item[\textbf{(d)}] $a = -148$, $b = -9$, $q = 16$, $r = 4$
\end{enumerate}
\end{problem}
\begin{problem}{4}
Find the binary and hexadecimal representations of the decimal number 2775.
\begin{center}
$101011010111_{2}$\\
$AD7_{16}$
\end{center}
\end{problem}
\begin{problem}{5}
Use the Extended Euclidean Algorithm to compute the greatest common divisor of the integers $768$ and $46$, and to express that greatest common divisor in the form $768x + 46y$, where $x, y \in Z$.
\begin{center}
$2 = 10(768) + 167(46)$
\end{center}
\end{problem}
\begin{problem}{6}
Factor the integer 155,540 into a product of primes.
\begin{center}
$2^{2} \times 5 \times 7 \times 11 \times 101$
\end{center}
\end{problem}
\end{document}
| {
"alphanum_fraction": 0.5397658458,
"avg_line_length": 30.0242424242,
"ext": "tex",
"hexsha": "fb7d851af053a761f48c86b025e22cc2f79ac99c",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "877cdf2586d3e6f8be639b16e17715a9cbfc8715",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "shmishtopher/MATH-218",
"max_forks_repo_path": "src/Assignment_005.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "877cdf2586d3e6f8be639b16e17715a9cbfc8715",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "shmishtopher/MATH-218",
"max_issues_repo_path": "src/Assignment_005.tex",
"max_line_length": 265,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "877cdf2586d3e6f8be639b16e17715a9cbfc8715",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "shmishtopher/MATH-218",
"max_stars_repo_path": "src/Assignment_005.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1890,
"size": 4954
} |
\subsection{Multilingual Experiments}
\label{sec:multilang}
\noindent Following Christodoulopoulos et
al. \shortcite{christodoulopoulos-goldwater-steedman:2011:EMNLP}, we extend our
experiments to 8 languages from MULTEXT-East (Bulgarian, Czech, English,
Estonian, Hungarian, Romanian, Slovene and Serbian) \cite{citeulike:5820223}
and 10 languages from the CoNLL-X shared task (Bulgarian, Czech, Danish, Dutch,
German, Portuguese, Slovene, Spanish, Swedish and Turkish)
\cite{Buchholz:2006:CST:1596276.1596305}.
To sample substitutes, we trained language models of Bulgarian, Czech,
Estonian, Romanian, Danish, German, Dutch, Portuguese, Spanish, Swedish and
Turkish with their corresponding TenTen corpora \cite{jakubivcek2013tenten},
and Hungarian, Slovene and Serbian with their corresponding Wikipedia dump
files\footnote{Latest Wikipedia dump files are freely available at
\url{http://dumps.wikimedia.org/} and the text in the dump files can be
extracted using WP2TXT (\url{http://wp2txt.rubyforge.org/})}. Serbian shares
a common basis with Crotian and Bosnian therefore we trained 3 different
language models using Wikipedia dump files of Serbian together with these two
languages and measured the perplexities on the MULTEXT-East Serbian corpus.
We chose the Croatian language model since it achieved the lowest perplexity
score and unknown word ratio on MULTEXT-East Serbian corpus. We use ukWaC
corpora to train English language models.
We used the default settings in Section~\ref{sec:expset} and incorporated only
the orthographic features\footnote{All corpora (except German, Spanish and
Swedish) label the punctuation marks with the same gold-tag therefore we add an
extra {\em punctuation} feature for those languages.}. Extracting
unsupervised morphological features for languages with different
characteristics would be of great value, but it is beyond the scope of this
paper. For each language the number of induced clusters is set to the number
of tags in the gold-set. To perform meaningful comparisons with the previous
work we train and evaluate our models on the training section of
MULTEXT-East\footnote{Languages of MULTEXT-East corpora do not tag the
punctuations, thus we add an extra tag for punctuations to the tag-set of these
languages.} and CONLL-X languages \cite{Lee:2010:STU:1870658.1870741}.
Table~\ref{tab:multiresults} presents the performance of our instance based
model on 19 corpora in 15 languages together with the corresponding best
published results from
$^\diamond$\protect\cite{yatbaz-sert-yuret:2012:EMNLP-CoNLL},
$^\ddagger$\protect\cite{blunsom-cohn:2011:ACL-HLT2011},
$^\star$\protect\cite{christodoulopoulos-goldwater-steedman:2011:EMNLP} and
$^\dagger$\protect\cite{Clark:2003:CDM:1067807.1067817}. All of the
state-of-the-art systems in Table~\ref{tab:multiresults} are word-based and
incorporate morphological features.
\input{multibesttable_instance.tex}
Our \mto\ results are lower than the best systems on all of data-sets that use
language models trained on the Wikipedia corpora. ukWaC and TenTen corpora are
cleaner and tokenized better compared to the Wikipedia corpora. These corpora
also have larger vocabulary sizes and lower out-of-vocabulary rates. Thus
language models trained on these corpora have much lower perplexities and
generate better substitutes than the Wikipedia based models. Our model has
lower \vm\ scores in spite of good \mto\ scores on 14 corpora which is
discussed in Section~\ref{sec:discuss}.
Among the languages for which clean language model corpora were available, our
model performs comparable to or significantly better than the best systems on
most languages. We show significant improvements on MULTEXT-East Czech,
Romanian, and CoNLL-X Bulgarian. Our model achieves the state-of-the-art \mto\
on MULTEXT-East English and scores comparable \mto\ with the best model on WSJ.
Our model shows comparable results on MULTEXT-East Bulgarian and Estonian, and
CoNLL-X Czech, Danish, Dutch, German, Portuguese, Swedish and Turkish in terms
of the \mto\ score. One reason for comparably low \mto\ on Spanish might be
the absence of morphological features.
% TODO: describe the table.
%% Morphological features of each language are extracted by the
%% method described in Section~\ref{sec:feat}. The details of the
%% language model training and feature extraction are detailed in
%% Appendix~D.
%% We ignore these results
%% \input{tokentable.tex}
%% \subsubsection{Results}
%% \label{sec:multires}
%% For each language we report results of three models that cluster: (1)
%% word embeddings ({\em CLU-W}), (2) word embeddings with orthographic
%% features ({\em CLU-W+O}) and (3) word embeddings with both orthographic
%% and morphological features ({\em CLU-W+O+M}).
%% As a baseline model we chose the syntagmatic bigram version of S-CODE
%% described in Section~\ref{sec:pvss} which is a very strong baseline
%% compared to the ones used in
%% \cite{christodoulopoulos-goldwater-steedman:2011:EMNLP}.
%% Table~\ref{tab:multiresults} summarizes the \mto\ and \vm\ scores of
%% our models together with the syntagmatic bigram baseline and the best
%% published accuracies on each language corpus.
%% {\em CLU-W} significantly outperforms the syntagmatic bigram baseline
%% in both \mto\ and \vm\ scores on 14 languages. {\em CLU-W+O+M} has
%% the state-of-the-art \mto\ and \vm\ accuracy on the PTB. {\em
%% CLU-W+O} and {\em CLU-W+O+M} achieve the highest \mto\ scores on all
%% languages of MULTEXT-East corpora while scoring the highest \vm\
%% accuracies on English and Romanian. On the CoNLL-X languages our
%% models perform better than the best published \mto\ or \vm\ accuracies
%% on 10 languages.
| {
"alphanum_fraction": 0.7862489121,
"avg_line_length": 58.6224489796,
"ext": "tex",
"hexsha": "83518d6bf5f8cc78ca84c28ac3bca944cd4f5553",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f4723cac53b4d550d2b0c613c9577eb247c7ff4a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ai-ku/upos_2014",
"max_forks_repo_path": "papers/coling2014/multi.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f4723cac53b4d550d2b0c613c9577eb247c7ff4a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ai-ku/upos_2014",
"max_issues_repo_path": "papers/coling2014/multi.tex",
"max_line_length": 79,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f4723cac53b4d550d2b0c613c9577eb247c7ff4a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ai-ku/upos_2014",
"max_stars_repo_path": "papers/coling2014/multi.tex",
"max_stars_repo_stars_event_max_datetime": "2015-06-06T07:13:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-06-06T07:13:43.000Z",
"num_tokens": 1515,
"size": 5745
} |
% !TEX root = ../thesis-letomes.tex
\chapter{Preface}
This master's thesis was prepared at the department of Applied Mathematics and Computer Science at the Technical University of Denmark in fulfillment of the requirements for acquiring a Master of Science in Engineering degree in Mathematical Modelling and Computation for Gandalf Saxe and Master of Science in Engineering degree in Digital Media Engineering for Oisin Daly Kiær.
% NOTE: Capitalize the type of degree and the actual degree itself. E.g. say "Master of Science in Engineering degree in Mathematical Modelling and Computation", see https://www.grammarly.com/blog/masters-degree/
\vfill
{
\centering
\thesislocation{}, \today\\[1cm]
% \hspace{3cm}\includegraphics[scale=0.4]{Signature}\\[1cm]
% \begin{figure}[H]
% \subfloat{
% \includegraphics[width=0.46\linewidth]{fig/signature-gandalf}
% }
% \hfill
% \subfloat{
% \includegraphics[width=0.46\linewidth]{fig/signature-oisin}
% }
% \end{figure}
\vspace{5cm}
\begin{flushright}
\thesisauthor{}
\end{flushright}
}
| {
"alphanum_fraction": 0.7132616487,
"avg_line_length": 42.9230769231,
"ext": "tex",
"hexsha": "77082d844cb5086341f022deedf3228ad342f7e1",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "5f73a4066fcf69260cb538c105acf898b22e756d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "GandalfSaxe/letomes",
"max_forks_repo_path": "report/frontmatter/Preface.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "5f73a4066fcf69260cb538c105acf898b22e756d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "GandalfSaxe/letomes",
"max_issues_repo_path": "report/frontmatter/Preface.tex",
"max_line_length": 378,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "5f73a4066fcf69260cb538c105acf898b22e756d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "GandalfSaxe/letomes",
"max_stars_repo_path": "report/frontmatter/Preface.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 288,
"size": 1116
} |
\documentclass{my_cv}
\begin{document}
\name{Saikat Roy}
\contact{Hirschberger Strasse 64}{53119 Bonn}{Germany}{[email protected]}{(+49)-16283-19605}
% \longcontact{123 Broadway}{London}{UK 12345}{[email protected]}{(000)-111-1111}
\section{Objective}
\hspace{1pt}\parbox{0.99\textwidth}{
An experienced machine learning (ML) researcher with extensive academic, practical knowledge and high-impact publications. Specialization in Deep Learning (DL) for classification \& segmentation, with extensive understanding of architectural and training paradigms. Actively looking for opportunities as a ML/DL researcher or Data Scientist.
}
\vspace{-7pt}
\section{Education}
\edsubsection{University of Bonn}{Bonn, Germany}{Master of Science, Computer Science (Intelligent Systems)}{2017--2020}{Fully-3D Deep CNNs for Segmentation of Neuroanatomy}\vspace{0.1cm}
\\
\edsubsection{Jadavpur University}{Kolkata, India}{Master of Engineering, Software Engineering}{2013--2015}{Supervised-Layerwise Training of Deep CNNs for Classification}
% \vspace{0.1cm}
% \\
% \edsubsection{West Bengal University of Technology}{Kolkata, India}{Bachelor of Technology, Computer Science \& Engineering}{2009--2013}{Higher-Order LSB emcoding for audio steganography}
\vspace{-15pt}
\section{Work Experience \hfill {\small \href{https://linkedin.com/in/mrsaikatroy}{\includegraphics[scale=0.075]{LI-Logo.png}}}}
\worksubsection{German Center for Neurodegenerative Diseases (DZNE)}{Bonn, Germany}{Research Assistant, Image Analysis Group}{2018--2020}{
\item[\textbf{--}] Led the development of optimized 3D CNN blocks for full-volume neuroanatomical segmentation through efficient reparameterization
\item[\textbf{--}] Developed architectures for optimized memory-usage during training to promote model reusability in semantic segmentation in medical imaging
% \item[\textbf{--}]
}\vspace{0.1cm}
\\
\worksubsection{Jadavpur University}{Kolkata, India}{Junior Research Fellow, Dept. of Computer Science \& Engg.}{2016--2017}{
\item[\textbf{--}] Applied recurrent neural networks and classical time series analysis algorithms to the problem of appliance energy usage prediction
\item[\textbf{--}] Implemented distributed gradient descent algorithms by developing a PySpark wrapper for a Keras and Flask framework
}
\vspace{0.1cm}
\\
\worksubsection{Indian Statistical Institute}{Kolkata, India}{Project Trainee (Intern), Computer Vision and Pattern Recognition Unit}{2015--2016}{
\item[\textbf{--}] Developed supervised-layerwise deep CNNs for document classification on limited data
}
\vspace{-12pt}
\section{Skills}
\item[]
Technical Proficiency \vspace{-6pt}
\begin{itemize}
\resitem{\textbf{Proficient:} Python, NumPy, Scikit-Learn, PyTorch, Git, \LaTeX, Matplotlib}
\resitem{\textbf{Familiar:} Linux (Usage and Shell Scripting), C, SciPy, R, SQL, Apache Spark, Matlab, Keras, Pandas, Docker}
% \resitem{Removed:} C++, Java, Scala, Pylearn2, Weka
\end{itemize}
\item[]
Relevant Courses \vspace{-6pt}
\begin{itemize}
\resitem{Machine Learning, Technical Neural Networks, Data Science \& Big Data, Distributed Big Data Analytics, Data Analytics and Visualization, Knowledge Graph Analysis, Deep Learning for Visual Recognition, Pattern Recognition, Deep Learning on GPUs, Advanced Deep Learning for Graphics}
% \resitem {\textbf{University of Bonn (M.Sc):} Machine Learning, Technical Neural Networks, Data Science \& Big Data, Distributed Big Data Analytics, Data Analytics and Visualization, Knowledge Graph Analysis, Deep Learning for Visual Recognition, Pattern Recognition, Deep Learning on GPUs, Advanced Deep Learning for Graphics}
% \resitem {\textbf{Jadavpur University (M.E.):} Distributed Databases, Advanced Operating Systems, Advanced Algorithms, Advanced Programming Lab}
% \resitem {\textbf{Bachelors:} Linear Algebra, Probability and Statistics, Calculus (Engineering Mathematics), Data Structures and Algorithms, Theory of Computation, Object-Oriented Programming,}
\end{itemize}
% \item[]
% Linguistic Proficiency \vspace{-6pt}
% \begin{itemize}
% \resitem{English (TOEFL: 114/120), Bengali (Native), Hindi (Basic) , German (Basic)}
% \end{itemize}
\end{itemize}
\section{Publications (Selected) \hfill { \small \href{https://scholar.google.de/citations?user=dSs0DfoAAAAJ&hl=en}{\includegraphics[scale=0.15]{scholar_logo_64dp.png}}}}
% \parbox{\textwidth}{
\begin{itemize}[leftmargin=10pt]
% \setlength{\itemsep}{0pt}
\item[\textbf{--}] A. Das, \textbf{S. Roy}, U. Bhattacharya, S.K. Parui, ``Document Image Classification with Intra-Domain Transfer Learning and Stacked Generalization of Deep Convolutional Neural Networks,`` \textit{$24^{th}$ International Conference on Pattern Recognition (ICPR)}, Beijing, China, 2018.
% \item \textbf{S. Roy}, K. Mishra, S. Basu, U. Maulik, ``A Distributed Multilabel Classification Approach towards Mining Appliance Usage in Smart Homes,`` \textit{IEEE Calcutta Conference (CALCON)}, Kolkata, India, 2017.
\item[\textbf{--}] \textbf{S. Roy}, N. Das, M. Kundu, M. Nasipuri, ``Handwritten Isolated Bangla Compound Character Recognition: A new benchmark using a novel deep learning approach,`` \textit{Pattern Recognition Letters, Elsevier}, Vol. 90, pp.15-21, 2017.
\item[\textbf{--}] \textbf{S. Roy}, A. Das, U. Bhattacharya, ``Generalized Stacking of Layerwise-trained Deep Convolutional Neural Networks for Document Image Classification,`` \textit{$23^{rd}$ International Conference on Pattern Recognition (ICPR)}, Cancun, Mexico, 2016.
\end{itemize}
% }
\section{Projects \hfill {\small \href{https://github.com/saikat-roy}{\includegraphics[scale=0.03]{GitHub_Logo.png}}}}
\begin{itemize}[leftmargin=10pt, itemsep=0pt]
\item[--] Implementation of \texttt{Autoencoders, GANs} (Advanced Deep Learning course)
\item[--] \texttt{Deep CNNs} for Humanoid Robot Part Detection and Localization (Vision Systems lab)
\item[--]Implementation of \texttt{Logistic Regression, MLPs, CNNs, VGGNets, ResNets, LSTMs, GRUs, Transfer Learning} based Nets (Vision Systems lab)
\item[--] Scalable Evolutionary Algorithm for Association Rule Mining from Ontological Knowledge Bases using Apache Spark (Distributed Big Data lab)
\item[--] \texttt{Deep Convolutional GAN} retraining on ImageNet-1k (Deep Learning lecture assignment)
\item[--] Implementation of \texttt{Decision Trees and Rules, NN Classifiers, Ridge Regression} (Machine Learning course)
% \item[--] Distributed Regression Models for Appliance Usage Analytics \dotfill 2016
\item[--] Deep \texttt{CNN-LSTM} Networks for Electric Load and Wind Power Forecasting
% \item[--] CNN based models for Social Network Analysis \dotfill 2016
\item[--] Supervised Layerwise training of \texttt{Deep CNNs} for Character and Document Recognition
% \item[--] Deep Fully Connected Neural Networks for ECML-PKDD 2015 MLiLS Challenge \dotfill 2015
% \item[--] Signature Recognition with High Pressure Points and One-Class Classifiers \dotfill 2014
% \item[--] Image Moments and MLPs for Devnagari Character Recognition \dotfill 2012-2013
\end{itemize}
\section{Professional Service}
% \begin{itemize}[leftmargin=10pt]
% \item[\textbf{--}] Reviewer: \href{http://portal.core.edu.au/conf-ranks/1169/}{ICPR} (2018), \href{https://link.springer.com/journal/10579}{\textit{Language Resources and Evaluation}, Springer} (2018), \href{https://www.springer.com/journal/12046}{\textit{Sadhana, Springer}} (2019, 2020), \href{https://ieeeaccess.ieee.org/}{\textit{IEEE Access}} (2019)}, \vspace{0.15cm} \href{https://digital-library.theiet.org/content/journals/iet-ifs}{\textit{IET Information Security}} (2020)
% \end{itemize}
\begin{itemize}[leftmargin=10pt]
\item[\textbf{--}] Reviewer (Selected): ICPR (2018), \textit{Language Resources and Evaluation}, Springer (2018), \textit{Sadhana, Springer} (2019, 2020), \textit{IEEE Access} (2019), \textit{IET Information Security} (2020)
\end{itemize}
\section{Miscelleaneous}
\begin{itemize}[leftmargin=10pt, noitemsep]
\item[\textbf{--}] GATE Scholarship (2013--2015) for Postgraduate Studies, Govt. of India.
\item[\textbf{--}] Erasmus Mundus FUSION Scholarship for PhD mobility between Jadavpur University, India and University of Evora, Portugal (Did not accept offer)
\end{itemize}
% \section{References}
% Available on Request
\end{document} | {
"alphanum_fraction": 0.7558484741,
"avg_line_length": 67.368,
"ext": "tex",
"hexsha": "f5a677da8e27aaaa3345c1317f657f10f7677e8a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b5104a8c4dca8635f4b99476ef1fc09de3e4ddb8",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "saikat-roy/cv-clean",
"max_forks_repo_path": "cv.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b5104a8c4dca8635f4b99476ef1fc09de3e4ddb8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "saikat-roy/cv-clean",
"max_issues_repo_path": "cv.tex",
"max_line_length": 488,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b5104a8c4dca8635f4b99476ef1fc09de3e4ddb8",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "saikat-roy/roy-cv-clean",
"max_stars_repo_path": "cv.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-01T20:45:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-01T20:45:30.000Z",
"num_tokens": 2333,
"size": 8421
} |
\section{Conclusions}
\frame{\tableofcontents[currentsection, hideothersubsections]}
\begin{frame}
\frametitle{Conclusions}
?
\end{frame}
\begin{frame}
\Huge{\centerline{Discussion time and thank you.}}
\end{frame} | {
"alphanum_fraction": 0.7777777778,
"avg_line_length": 19.6363636364,
"ext": "tex",
"hexsha": "49ac7ff3fef0495b96bdf1e1c467125871e49c1a",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "779b0d9583fe0f4c582f03b808dd2b7027088493",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tttor/robot-foundation",
"max_forks_repo_path": "talk/tor/online-pomdp-planning/src/conclusion.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "779b0d9583fe0f4c582f03b808dd2b7027088493",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tttor/robot-foundation",
"max_issues_repo_path": "talk/tor/online-pomdp-planning/src/conclusion.tex",
"max_line_length": 62,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "779b0d9583fe0f4c582f03b808dd2b7027088493",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tttor/robot-foundation",
"max_stars_repo_path": "talk/tor/online-pomdp-planning/src/conclusion.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 65,
"size": 216
} |
\documentclass[letterpaper,9pt,twoside,printwatermark=false]{pinp}
%% Some pieces required from the pandoc template
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
% Use the lineno option to display guide line numbers if required.
% Note that the use of elements such as single-column equations
% may affect the guide line number alignment.
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
% The geometry package layout settings need to be set here...
\geometry{layoutsize={0.95588\paperwidth,0.98864\paperheight},%
layouthoffset=0.02206\paperwidth,%
layoutvoffset=0.00568\paperheight}
\definecolor{pinpblue}{HTML}{185FAF} % imagecolorpicker on blue for new R logo
\definecolor{pnasbluetext}{RGB}{101,0,0} %
\title{Assignment 5 - Inference for a Population Mean. Due October 14, 11:59pm
2018}
\author[a]{EPIB607 - Inferential Statistics}
\affil[a]{Fall 2018, McGill University}
\setcounter{secnumdepth}{5}
% Please give the surname of the lead author for the running footer
\leadauthor{Bhatnagar and Hanley}
% Keywords are not mandatory, but authors are strongly encouraged to provide them. If provided, please include two to five keywords, separated by the pipe symbol, e.g:
\keywords{ t-test | One sample mean | Bootstrap }
\begin{abstract}
In this assignment you will practice conducting inference for a one
sample mean using either the z procedure, t procedure, or the bootstrap.
Answers should be given in full sentences (DO NOT just provide the
number). All figures should have appropriately labeled axes, titles and
captions (if necessary). All graphs and calculations are to be completed
in an R Markdown document using the provided template. You are free to
choose any function from any package to complete the assignment. Concise
answers will be rewarded. Be brief and to the point. Please submit both
the compiled HTML report and the source file (.Rmd) to myCourses by
October 14, 2018, 11:59pm. Both HTML and .Rmd files should be saved as
`IDnumber\_LastName\_FirstName\_EPIB607\_A5'.
\end{abstract}
\dates{This version was compiled on \today}
\doi{\url{https://sahirbhatnagar.com/EPIB607/}}
\pinpfootercontents{Assignment 5 due October 14, 2018 by 11:59pm}
\begin{document}
% Optional adjustment to line up main text (after abstract) of first page with line numbers, when using both lineno and twocolumn options.
% You should only change this length when you've finalised the article contents.
\verticaladjustment{-2pt}
\maketitle
\thispagestyle{firststyle}
\ifthenelse{\boolean{shortarticle}}{\ifthenelse{\boolean{singlecolumn}}{\abscontentformatted}{\abscontent}}{}
% If your first paragraph (i.e. with the \dropcap) contains a list environment (quote, quotation, theorem, definition, enumerate, itemize...), the line after the list may have some extra indentation. If this is the case, add \parshape=0 to the end of the list environment.
\section*{Template}\label{template}
\addcontentsline{toc}{section}{Template}
The \texttt{.Rmd} template for Assignment 5 is available
\href{https://github.com/sahirbhatnagar/EPIB607/raw/master/assignments/a5/a5_template.Rmd}{here}
\section{Food intake and weight gain}\label{food-intake-and-weight-gain}
If we increase our food intake, we generally gain weight. Nutrition
scientists can calculate the amount of weight gain that would be
associated with a given increase in calories. In one study, 16 nonobese
adults, aged 25 to 36 years, were fed 1000 calories per day in excess of
the calories needed to maintain a stable body weight. The subjects
maintained this diet for 8 weeks, so they consumed a total of 56,000
extra calories. According to theory, 3500 extra calories will translate
into a weight gain of 1 pound. Therfore we expect each of these subjects
to gain 56,000/3500=16 pounds (lb). Here are the weights (given in the
\texttt{weightgain.csv} file) before and after the 8-week period
expressed in kilograms (kg):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{weight <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{"weightgain.csv"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# subject before after
# 1 1 55.7 61.7
# 2 2 54.9 58.8
# 3 3 59.6 66.0
# 4 4 62.3 66.2
# 5 5 74.2 79.0
# 6 6 75.6 82.3
# 7 7 70.7 74.3
# 8 8 53.3 59.3
# 9 9 73.3 79.1
# 10 10 63.4 66.0
# 11 11 68.1 73.4
# 12 12 73.7 76.9
# 13 13 91.7 93.1
# 14 14 55.9 63.0
# 15 15 61.7 68.2
# 16 16 57.8 60.3
\end{verbatim}
\end{ShadedResult}
\begin{enumerate}
\def\labelenumi{\alph{enumi}.}
\tightlist
\item
Calculate a 95\% confidence interval for the mean weight change and
give a sentence explaining the meaning of the 95\%. State your
assumptions.
\item
Calculate a 95\% bootstrap confidence interval for the mean weight
change and compare it to the one obtained in part (a). Comment on the
bootstrap sampling distribution and compare it to the assumptions you
made in part (a).
\item
Convert the units of the mean weight gain and 95\% confidence interval
to pounds. Note that 1 kilogram is equal to 2.2 pounds.
\item
Test the null hypothesis that the mean weight gain is 16 lbs. State
your assumptions and justify your choice of test. Be sure to specify
the null and alternative hypotheses. What do you conclude?
\end{enumerate}
\newpage
\section{Attitudes toward school}\label{attitudes-toward-school}
The Survey of Study Habits and Attitudes (SSHA) is a psychological test
that measures the motivation, attitude toward school, and study habits
of students. Scores range from 0 to 200. The mean score for U.S. college
students is about 115, and the standard deviation is about 30. A teacher
who suspects that older students have better attitudes toward school
gives the SSHA to 25 students who are at least 30 years of age. Their
mean score is \(\bar{y}\) = 132.2 with a sample standard deviation
\(s = 28\).
\begin{enumerate}
\def\labelenumi{\alph{enumi}.}
\tightlist
\item
The teacher asks you to carry out a formal statistical test for her
hypothesis. Perform a test, provide a 95\% confidence interval and
state your conclusion clearly.
\item
What assumptions did you use in part (a). Which of these assumptions
is most important to the validity of your conclusion in part (a).
\end{enumerate}
\section{Does a full moon affect
behavior?}\label{does-a-full-moon-affect-behavior}
Many people believe that the moon influences the actions of some
individuals. A study of dementia patients in nursing homes recorded
various types of disruptive behaviors every day for 12 weeks. Days were
classified as moon days if they were in a 3-day period centered at the
day of the full moon. For each patient, the average number of disruptive
behaviors was computed for moon days and for all other days. The
hypothesis is that moon days will lead to more disruptive behavior. We
look at a data set consisting of observations on 15 dementia patients in
nursing homes (available in the \texttt{fullmoon.csv} file):
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{fullmoon <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{"fullmoon.csv"}\NormalTok{)}
\end{Highlighting}
\end{Shaded}
\begin{ShadedResult}
\begin{verbatim}
# patient moon_days other_days
# 1 1 3.33 0.27
# 2 2 3.67 0.59
# 3 3 2.67 0.32
# 4 4 3.33 0.19
# 5 5 3.33 1.26
# 6 6 3.67 0.11
# 7 7 4.67 0.30
# 8 8 2.67 0.40
# 9 9 6.00 1.59
# 10 10 4.33 0.60
# 11 11 3.33 0.65
# 12 12 0.67 0.69
# 13 13 1.33 1.26
# 14 14 0.33 0.23
# 15 15 2.00 0.38
\end{verbatim}
\end{ShadedResult}
\begin{enumerate}
\def\labelenumi{\alph{enumi}.}
\tightlist
\item
Calculate a 95\% confidence interval for the mean difference in
disruptive behaviors. State the assumptions you used to calculate this
interval.
\item
Calculate a 95\% bootstrap confidence interval for the mean difference
in disruptive behaviors and compare to the one obtained in part (a).
Comment on the bootstrap sampling distribution and compare it to the
assumptions you made in part (a).
\item
Test the hypothesis that moon days will lead to more disruptive
behavior. State your assumptions and provide a brief conclusion based
on your analysis.
\item
Find the minimum value of the mean difference in disruptive behaviors
(\(\bar{y}\)) needed to reject the null hypothesis.
\item
What is the probability of detecting an increase of 1.0 aggressive
behavior per day during moon days? \emph{Hint: calculate the
probability of the event calculated in part (d) using a normal
distribution with \(\mu=1\) and \(\sigma =\) the standard error of the
mean}
\end{enumerate}
\newpage
\section{How deep is the ocean?}\label{how-deep-is-the-ocean}
This question is based on the
\href{https://github.com/sahirbhatnagar/EPIB607/blob/master/exercises/water/students/260194225_water_exercise_epib607.pdf}{in-class
Exercise} on sampling distributions and builds on
\href{https://github.com/sahirbhatnagar/EPIB607/raw/master/assignments/a4/a4_clt_ci.pdf}{Question
4 from Assignment 4}. For your sample of \(n=20\) of depths of the ocean
\begin{enumerate}
\def\labelenumi{\alph{enumi}.}
\tightlist
\item
Calculate a 95\% Confidence interval using the \(t\) procedure
\item
Plot the qnorm, bootstrap, and \(t\) procedure confidence intervals on
the same plot and comment on the how the \(t\) interval compares to
the other 2 intervals. You may use the \texttt{compare\_CI} function
provided below to produce the plot.
\end{enumerate}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{compare_CI <-}\StringTok{ }\ControlFlowTok{function}\NormalTok{(ybar, QNORM, BOOT, TPROCEDURE,}
\DataTypeTok{col =} \KeywordTok{c}\NormalTok{(}\StringTok{"#E41A1C"}\NormalTok{,}\StringTok{"#377EB8"}\NormalTok{,}\StringTok{"#4DAF4A"}\NormalTok{)) \{}
\NormalTok{ dt <-}\StringTok{ }\KeywordTok{data.frame}\NormalTok{(}\DataTypeTok{type =} \KeywordTok{c}\NormalTok{(}\StringTok{"qnorm"}\NormalTok{, }\StringTok{"bootstrap"}\NormalTok{, }\StringTok{"t"}\NormalTok{),}
\DataTypeTok{ybar =} \KeywordTok{rep}\NormalTok{(ybar, }\DecValTok{3}\NormalTok{),}
\DataTypeTok{low =} \KeywordTok{c}\NormalTok{(QNORM[}\DecValTok{1}\NormalTok{], BOOT[}\DecValTok{1}\NormalTok{], TPROCEDURE[}\DecValTok{1}\NormalTok{]),}
\DataTypeTok{up =} \KeywordTok{c}\NormalTok{(QNORM[}\DecValTok{2}\NormalTok{], BOOT[}\DecValTok{2}\NormalTok{], TPROCEDURE[}\DecValTok{2}\NormalTok{])}
\NormalTok{ )}
\KeywordTok{plot}\NormalTok{(dt}\OperatorTok{$}\NormalTok{ybar, }\DecValTok{1}\OperatorTok{:}\KeywordTok{nrow}\NormalTok{(dt), }\DataTypeTok{pch =} \DecValTok{20}\NormalTok{, }\DataTypeTok{ylim =} \KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\DecValTok{5}\NormalTok{), }
\DataTypeTok{xlim =} \KeywordTok{range}\NormalTok{(}\KeywordTok{pretty}\NormalTok{(}\KeywordTok{c}\NormalTok{(dt}\OperatorTok{$}\NormalTok{low, dt}\OperatorTok{$}\NormalTok{up))),}
\DataTypeTok{xlab =} \StringTok{"Depth of ocean (m)"}\NormalTok{, }\DataTypeTok{ylab =} \StringTok{"Confidence Interval Type"}\NormalTok{,}
\DataTypeTok{las =} \DecValTok{1}\NormalTok{, }\DataTypeTok{cex.axis =} \FloatTok{0.8}\NormalTok{, }\DataTypeTok{cex =} \DecValTok{3}\NormalTok{)}
\KeywordTok{abline}\NormalTok{(}\DataTypeTok{v =} \DecValTok{37}\NormalTok{, }\DataTypeTok{lty =} \DecValTok{2}\NormalTok{, }\DataTypeTok{col =} \StringTok{"black"}\NormalTok{, }\DataTypeTok{lwd =} \DecValTok{2}\NormalTok{)}
\KeywordTok{segments}\NormalTok{(}\DataTypeTok{x0 =}\NormalTok{ dt}\OperatorTok{$}\NormalTok{low, }\DataTypeTok{x1 =}\NormalTok{ dt}\OperatorTok{$}\NormalTok{up,}
\DataTypeTok{y0 =} \DecValTok{1}\OperatorTok{:}\KeywordTok{nrow}\NormalTok{(dt), }\DataTypeTok{lend =} \DecValTok{1}\NormalTok{,}
\DataTypeTok{col =}\NormalTok{ col, }\DataTypeTok{lwd =} \DecValTok{4}\NormalTok{)}
\KeywordTok{legend}\NormalTok{(}\StringTok{"topleft"}\NormalTok{,}
\DataTypeTok{legend =} \KeywordTok{c}\NormalTok{(}\KeywordTok{eval}\NormalTok{(}\KeywordTok{substitute}\NormalTok{( }\KeywordTok{expression}\NormalTok{(}\KeywordTok{paste}\NormalTok{(mu,}\StringTok{" = "}\NormalTok{,}\DecValTok{37}\NormalTok{)))),}
\KeywordTok{sprintf}\NormalTok{(}\StringTok{"qnorm CI: [%.f, %.f]"}\NormalTok{,QNORM[}\DecValTok{1}\NormalTok{], QNORM[}\DecValTok{2}\NormalTok{]),}
\KeywordTok{sprintf}\NormalTok{(}\StringTok{"bootstrap CI: [%.f, %.f]"}\NormalTok{,BOOT[}\DecValTok{1}\NormalTok{], BOOT[}\DecValTok{2}\NormalTok{]),}
\KeywordTok{sprintf}\NormalTok{(}\StringTok{"t CI: [%.f, %.f]"}\NormalTok{,TPROCEDURE[}\DecValTok{1}\NormalTok{], TPROCEDURE[}\DecValTok{2}\NormalTok{])),}
\DataTypeTok{lty =} \KeywordTok{c}\NormalTok{(}\DecValTok{1}\NormalTok{,}\DecValTok{1}\NormalTok{,}\DecValTok{1}\NormalTok{,}\DecValTok{1}\NormalTok{),}
\DataTypeTok{col =} \KeywordTok{c}\NormalTok{(}\StringTok{"black"}\NormalTok{,col), }\DataTypeTok{lwd =} \DecValTok{4}\NormalTok{)}
\NormalTok{\}}
\CommentTok{# example of how to use the function:}
\KeywordTok{compare_CI}\NormalTok{(}\DataTypeTok{ybar =} \DecValTok{36}\NormalTok{, }\DataTypeTok{QNORM =} \KeywordTok{c}\NormalTok{(}\DecValTok{25}\NormalTok{,}\DecValTok{40}\NormalTok{), }\DataTypeTok{BOOT =} \KeywordTok{c}\NormalTok{(}\DecValTok{31}\NormalTok{, }\DecValTok{38}\NormalTok{), }\DataTypeTok{TPROCEDURE =} \KeywordTok{c}\NormalTok{(}\DecValTok{28}\NormalTok{, }\DecValTok{40}\NormalTok{))}
\end{Highlighting}
\end{Shaded}
%\showmatmethods
\bibliography{pinp}
\bibliographystyle{jss}
\end{document}
| {
"alphanum_fraction": 0.7120136325,
"avg_line_length": 47.1036789298,
"ext": "tex",
"hexsha": "5ac24dc70463de2ae82302c50c76510a8f5fb033",
"lang": "TeX",
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-11-25T21:19:06.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-11-25T21:19:06.000Z",
"max_forks_repo_head_hexsha": "ac2f917bc064f8028a875766af847114cd306396",
"max_forks_repo_licenses": [
"CC0-1.0"
],
"max_forks_repo_name": "ly129/EPIB607",
"max_forks_repo_path": "assignments/a5/a5_ttest.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ac2f917bc064f8028a875766af847114cd306396",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC0-1.0"
],
"max_issues_repo_name": "ly129/EPIB607",
"max_issues_repo_path": "assignments/a5/a5_ttest.tex",
"max_line_length": 405,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "ac2f917bc064f8028a875766af847114cd306396",
"max_stars_repo_licenses": [
"CC0-1.0"
],
"max_stars_repo_name": "ly129/EPIB607",
"max_stars_repo_path": "assignments/a5/a5_ttest.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 4321,
"size": 14084
} |
\section{Conclusion}\label{sec:conclusion}
Price response functions provide quantitative information on the deviation from
Markovian behavior. They measure price changes resulting from execution of
market orders. We used these functions in big data analysis for spot foreign
exchange markets. Such a study was, to the best of our knowledge, never done
before.
We analyzed price response functions in spot foreign exchange markets for
different years and different time scales. We used trade time scale and
physical time scale to compute the price response functions for the seven major
foreign exchange pairs for three different years. These major pairs are highly
relevant in the dynamics of the market. The use of different time scales and
calendar years in the work had the intention to display the different behaviors
the price response function could take when the time parameters differ.
The price response functions were analyzed according to the time scales. On
trade time scale, the signals were noisier. For both time scales we observe
that the signal for all the pairs increases to a maximum and then starts to
slowly decrease. However, for the year 2008 the shape of the signals is not as
well defined as in the other years. The increase-decrease behavior observed in
the spot foreign exchange market was also reported in correlated financial
markets \cite{my_paper_response_financial,Wang_2016_avg}. These results show
that the price response functions conserve their behavior in different years
and in different markets. The shape of the price
response functions is qualitatively explained considering an initial increase
caused by the autocorrelated transaction flow. To assure diffusive prices,
price response flattens due to market liquidity adapting to the flow in the
initial increase.
On both scales, the more liquid pairs have a smaller price response function
compared with the non-liquid pairs. As the liquid pairs have more trades during
the market time, the impact of each trade is reduced. Comparing years and
scales, the price response signal is stronger in past than in recent years. As
algorithmic trading has gained great relevance, the quantity of trades has
grown in recent years, and in consequence, the impact in the response has
decreased.
Finally, we checked the pip bid-ask spread impact in price response functions
for three different years. We used 46 foreign exchange pairs and grouped them
depending on the conditions of the corresponding year analyzed. We employ the
year average pip bid-ask spread of every pair for each year. For all the year
and time scales, the price response function signals were stronger for the
groups of pairs with larger pip bid-ask spreads and weaker for the group of
pairs with smaller bid-ask spreads. For the average of the price response
functions, it was only possible to see the increase-maximum-decrease behavior
in the year 2015 in both scales, and in the year 2019 on trade time scale.
Hence, the noise in the cross and exotic pairs due to the lack of trading
compared with the majors seems stronger. A general average price response
behavior for each year and time scale was spotted for the groups, suggesting a
market effect on the foreign exchange pairs in each year.
Comparing the response functions in stock and spot currency exchange markets
from a more general viewpoint, we find a remarkable similarity. It triggers the
conclusion that the order book mechanism generates in a rather robust fashion
the observed universal features in these two similar, yet different subsystems
within the financial system. | {
"alphanum_fraction": 0.8224013341,
"avg_line_length": 63.1228070175,
"ext": "tex",
"hexsha": "b90bbfe7996beb0321fe8ceb3d86757bf64be699",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "251ccccfc9a49f546db5e325ea6b594ff035d97f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "juanhenao21/forex",
"max_forks_repo_path": "paper/forex_response_spread_paper/sections/09_conclusion.tex",
"max_issues_count": 18,
"max_issues_repo_head_hexsha": "251ccccfc9a49f546db5e325ea6b594ff035d97f",
"max_issues_repo_issues_event_max_datetime": "2020-03-27T08:43:29.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-03-17T09:30:08.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "juanhenao21/forex",
"max_issues_repo_path": "paper/forex_response_spread_paper/sections/09_conclusion.tex",
"max_line_length": 79,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "251ccccfc9a49f546db5e325ea6b594ff035d97f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "juanhenao21/forex",
"max_stars_repo_path": "paper/forex_response_spread_paper/sections/09_conclusion.tex",
"max_stars_repo_stars_event_max_datetime": "2020-04-01T07:22:34.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-04-01T07:22:34.000Z",
"num_tokens": 727,
"size": 3598
} |
\paragraph{The Recorded Movement of a Thing}
Philosopher Bruno \textcite{Lat90:On, Lat93:We} developed a theory of networks called \gls{at}. \gls{at} can be understood as a way of connecting and associating entities to one another. It is a tool that builds an image of the world made of nodes along a decentralized web of meaning. Latour reformulates nodes and edges, with what he calls `semiotic actors,' `actants,' or `agents' (nodes) and of the interconnected accounts that these have of each other (edges). As I have mentioned earlier with the network model in databases \see{model:network}, navigating through networks is traversing from node to node. However, the (visual) two-dimensional metaphor of a `network' as a `surface' limits the understanding of its topology: ``instead of surfaces one gets filaments.'' \parencite[3]{Lat90:On} In this sense, he points to a misunderstanding that comes from giving \gls{at} a technical definition such as the one described with the database model: ``nothing is more intensely connected, more distant, more compulsory and more strategically organized than a computer network'' \parencite[2]{Lat90:On}. \gls{at} points towards a topological shift. Nevertheless, the navigational paradigm advanced by \textcite{Bachman:1973:PN:355611.362534} in relation to databases whose `keys' become \textit{n}-dimensional space, does translate well to Latour's model, because nodes have ``as many dimensions as they have connections'' \parencite[3]{Lat90:On}. In any case, what this navigation points to is that \gls{at} is comprised entirely of motion and activity: ``no net exists independently of the very act of tracing it, and no tracing is done by an actor exterior to the net'' \parencite[14]{Lat90:On}. Thus, meaning and connectivity are enabled by the activity or work of actors: ``In order to explain, to account, to observe, to prove, to argue, to dominate and to see, [an observer] has to move around and work, I should say it has to `network''' \parencite[13]{Lat90:On}. This work, \textit{net}-work, or `tracing,' is not only the movement of associations and connections, it is also the `recording' of this movement. In this sense, Latour claims that ``a network is not a thing but the recorded movement of a thing'' \parencite[14]{Lat90:On}. Furthermore, nothing falls outside the network: ``the surface `in between' networks is either connected ---but then the network is expanding--- or non-existing. Literally, a network has no outside \parencite[6]{Lat90:On}.'' The network encompasses its own actors and its own expansive motion. Most importantly, \gls{at} is a tool aimed at describing the nature of society. However, in this description, \gls{at} ``does not limit itself to human individual actors but extend[s] the word actor\dots to non-human, non individual entities'' \parencite[2]{Lat90:On}.
\paragraph{Howling}
Thinking networks in terms of a (sonic) three-dimensional metaphor (as a mechanical wave) is thus misunderstanding it. Coupling `resonant' and `network' results in a sort of (impossible) positive feedback. While a network expands in redundancy, overflow, accumulation, and self-reference, a sound attenuates towards imperceptible and infinitesimal thresholds. Lending an ear to the sound of \gls{at} we would find ourselves listening to expanding filaments. However, as an acoustic experiment that would combine the circuitry of a feedback with the accumulative quality of networks, I propose to consider \poscite{Luc70:Iam} \citetitle{Luc70:Iam}. I have transcribed this sound art piece at the beginning of this chapter, as it is remarkably self-explanatory \see{lucierlude}. It can be understood as a triple crossfade, first between speech and music, gradually crossfading into a second crossfade, between timbre and space. Through the circuitry of a closed and controlled feedback loop between a microphone, a speaker, and a room. More considerations of this work I will leave for some other time, and I will refer to \textcite{icmc/bbp2372.2012.006} for further readings on feedback systems. What I would like to bring here is an experimental revision of what is known as the Larsen effect: ``in every sound reinforcement system, where the loudspeaker and the microphone are placed in the same acoustic environment, a certain amount of the signal radiated by the loudspeaker is fed back into the microphone'' \parencite[11]{Kro11:Aco}. When these systems become unstable, the Larsen effect appears (also referred to as `howling'), ``resulting in a positive feedback producing pitched tones from the iterated amplification of a signal'' \parencite[31]{icmc/bbp2372.2012.006}. Therefore, in Lucier's room, what occurs is quite literally the Larsen phenomenon, but ``stretched in time,'' and thus the ``room itself acts like a filter'' \parencite[34]{icmc/bbp2372.2012.006}. Considering the mechanical contradiction in thinking resonant networks, I believe it necessary, then, to expand the `mechanical' side of the feedback system in question: Lucier's \textit{room} needs to be expanding as well. As a consequence, the ``resonant frequencies'' (nodes) of the expanding network would cease to ``reinforce themselves.'' However, (and here is the experiment) this does not mean that these nodes would cease to act, let alone resonate. In this sense, we can ask ourselves how would \textit{this} sound like? \textit{Where} would the `I' be actually \textit{sitting}?
\paragraph{The Resonant Movement of a Thing}
Such a feedback network would redefine the notion of a temporal delay into a \textit{spatial} delay. Instead of the Larsen effect being ``spread in time,'' in Lucier's work it would also spread through space. The room as a filter would resonate differently because it would be understood as a texture, a networked resonance. If Latour's semiotic actors are in constant reference to each other, it can be argued that they are in resonance with each other, in a permanent state of vibration, or simply, \textit{listening}. Thus, Latour's phrase can (perhaps) be reformulated: \textit{the network is not a thing, but the resonant movement of a thing}.
\paragraph{I am sitting in a loop}
This is the crucial leap that comes out of the idea of a resonant network: the moment the nonhuman in the network is comprehended as resonant, it is the moment that they engage with an approach to self (in Nancy's terms). Following this logical thread, a database can be considered as a semiotic actor as well as a resonant subject. On one hand, databases are not just networks, they are actor-networks: acting, tracing, and listening. On the other hand, since databases are indeed listening, to what extent can we think of them as listening to themselves listening? Bringing back Lucier and his room, I would like to address this question with another aspect of this work. (And by `work' I begin to introduce an important aspect of this dissertation, a concept that embraces activity, productivity, but also product, and objects: operativity and opus.) There is indeed a fourth crossfade, between the `I' in the text, and the `I' in the voice that reads it. The simplest way to approach this is by asking ourselves, if after recording the first input signal Lucier remained seated \textit{in} the room or not. This is a difference that cannot be approached from the recording itself because it is inaudible. I will refer to this difference further down this text. For now I point to the fact that the moment Lucier recorded his voice, the `I' began residing \textit{in the loop}. I believe this is one of the most crucial `irregularities' that can be found throughout the work. In the interlude at the beginning of this chapter, I transcribe the text as Lucier reads it. I attempted to be as clear as possible, crossing out, replacing, extending all the consonants into what I thought was a more faithful score for the read fragment. I resorted to these words being ``under erasure,'' that is, ``to write a word, cross it out, and then print both word and deletion'' \parencite[xiv]{Der76:Of}, because in this way one would have a visual cue of both the instruction and a more verbose inscription of the voice. (To a certain extent, \textit{I am sitting in a room\dots} can be understood as a music work `under' constant `erasure.') Thinking as a composer, this score (with all its notated irregularities) would explain the first minutes so fiercely that the mystery of the last minutes would be solved. But, the most crucial aspect of the piece cannot be rendered in symbolic transcription, because the `I' is somewhere in between the transcribed and the inscribed \see{spectrality}. This `I' is what is at stake when databases begin to resonate, that is, it is the approach to this notion of `self' what begins to redefine ourselves in general. That is to say, within the resonant network we face a `self' that changes our own notion of `self' in general. In this sense, a `self' \textit{sitting in a loop} returns to us (resonates back) putting into question a relationship (a difference): what is the difference between the two `I's? Is is this same difference at stake between the human and the nonhuman? The implications of these questions I will move forth in the remaining sections of this dissertation. However, the most present step is analyzing the conjunction that the two clauses of the question points to: the sharing of the `I,' an exposure of community.
| {
"alphanum_fraction": 0.7863193562,
"avg_line_length": 726.4615384615,
"ext": "tex",
"hexsha": "1aafbd6e7d198f79d9080b382f466bddee19b1e6",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a42332d3551d42856bf102a00ea84bb4c924a86b",
"max_forks_repo_licenses": [
"FSFAP"
],
"max_forks_repo_name": "fdch/database_music",
"max_forks_repo_path": "content/part-3/section-4/sub/network.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a42332d3551d42856bf102a00ea84bb4c924a86b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"FSFAP"
],
"max_issues_repo_name": "fdch/database_music",
"max_issues_repo_path": "content/part-3/section-4/sub/network.tex",
"max_line_length": 3270,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a42332d3551d42856bf102a00ea84bb4c924a86b",
"max_stars_repo_licenses": [
"FSFAP"
],
"max_stars_repo_name": "fdch/database_music",
"max_stars_repo_path": "content/part-3/section-4/sub/network.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2203,
"size": 9444
} |
\documentclass{article}
\usepackage[utf8]{inputenc}
\title{PS2}
\author{Audrey Hopewell}
\date{January 2020}
\usepackage{natbib}
\usepackage{graphicx}
\begin{document}
\maketitle
\section{Tools of a Data Scientist}
\begin{itemize}
\item Measurement
\item Statistical programming languages
\item Web scraping
\item Handling large data sets
\item Visualization
\item Modeling
\end{itemize}
\end{document}
| {
"alphanum_fraction": 0.7413394919,
"avg_line_length": 16.6538461538,
"ext": "tex",
"hexsha": "43d506f06b798005e686197e154e435d07819d56",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "36f210d6ea43d254b497deb7b204acce694cc3ba",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "AudreyHopewell/DScourseS20",
"max_forks_repo_path": "ProblemSets/PS2/PS2_Hopewell.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "36f210d6ea43d254b497deb7b204acce694cc3ba",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "AudreyHopewell/DScourseS20",
"max_issues_repo_path": "ProblemSets/PS2/PS2_Hopewell.tex",
"max_line_length": 43,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "36f210d6ea43d254b497deb7b204acce694cc3ba",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "AudreyHopewell/DScourseS20",
"max_stars_repo_path": "ProblemSets/PS2/PS2_Hopewell.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 125,
"size": 433
} |
\documentclass[epsfig,10pt,fullpage]{article}
\newcommand{\LabNum}{2}
\newcommand{\CommonDocsPath}{../../common/docs}
\input{\CommonDocsPath/preamble.tex}
\begin{document}
\centerline{\huge Embedded Systems}
~\\
\centerline{\huge Laboratory Exercise \LabNum}
~\\
\centerline{\large Developing Linux* Programs that Communicate with the FPGA}
~\\
\section*{Part I}
\noindent
In Laboratory Exercise 1 you were asked to write a user-level program to control the LED
lights on your DE-series board. Here you are to write another user-level program, which
shows a {\it scrolling} message on a display. If you are using the Intel \textsuperscript{\textregistered} DE1-SoC or
DE10-Standard boards, then you can use seven-segment displays to show the scrolling message.
Your program should display the message \texttt{\red{Intel SoC FPGA}} and should scroll the
message in the right-to-left direction across the displays. The letters in the message can be
constructed as
\begin{figure}[h!]
\begin{center}
\includegraphics[scale=0.33]{figures/HEX_words.pdf}
\end{center}
\label{fig:HEXwords}
\end{figure}
\noindent
The seven-segment display ports in the DE1-SoC and DE10-Standard computer
systems are illustrated in Figure~\ref{fig:segment}.
\begin{figure}[H]
\begin{center}
\includegraphics{figures/fig_segment_port.pdf}
\end{center}
\caption{The seven-segment display ports.}
\label{fig:segment}
\end{figure}
\noindent
As an alternative to seven-segment displays, you can use the Linux* Terminal window. In
this case, you should designate a six-character space on the display in which to show the
message. You can ``draw'' a box using ASCII characters as illustrated below:
\begin{lstlisting}
------
| |
------
\end{lstlisting}
\noindent
If the message is scrolled inside of this box, the effect will be similar in appearance to
using (six) seven-segment displays. Some Terminal window commands are listed in
Table~\ref{tab:vt100}. For
example, the command \texttt{$\backslash$e[2J} clears the Terminal window, and the
command \texttt{$\backslash$e[H} moves the Terminal {\it cursor} to the {\it home} position in
the upper-left corner of the window. In these commands \texttt{$\backslash$e} represents
the \texttt{Escape} character. It can alternatively be specified by its ASCII code, using
the syntax \texttt{$\backslash$033}. You can send such commands to the Terminal window by
using the \texttt{printf} function. For example, the Terminal window can be cleared by calling
\begin{lstlisting}
printf ("\e[2J"); // clear Terminal window
fflush (stdout);
\end{lstlisting}
\noindent
Additional Terminal window commands can be found by searching on the Internet for
\texttt{VT100 escape codes}.
\begin{table}[h]
\caption{Terminal window ASCII commands.}
~\\
\centering
\label{tab:vt100}
\begin{tabular}{l|l}
{\bf Command} & {\bf Result} \\ \hline
\rule{0cm}{.375cm}\texttt{$\backslash$e7} & save cursor position and attributes\\
\texttt{$\backslash$e8} & restore cursor position and attributes\\
\texttt{$\backslash$e[H} & move the cursor to the home position\\
\texttt{$\backslash$e[?25l} & hide the cursor \\
\texttt{$\backslash$e[?25h} & show the cursor \\
\texttt{$\backslash$e[2J} & clear window \\
\texttt{$\backslash$e[ccm} & set foreground color to \texttt{cc}$^1$ \\
\texttt{$\backslash$e[yy;xxH} & set cursor location to row \texttt{yy}, column \texttt{xx}
\end{tabular}
\end{table}
\noindent
For both seven-segment displays and the Terminal window, use a delay when scrolling the message
so that the letters shift to the left at a reasonable speed. To implement the required delay you
can use a Linux library function such as \texttt{nanosleep}.
~\\
\noindent
Perform the following:
\begin{enumerate}
\item Create a file called {\it part1.c} and type your C code into this file. Whether you are
using seven-segment displays or the Terminal window, your code should be mostly the same. In
one case, you write six characters at a time from the message to the seven-segment display
ports, and in the other case you print these same characters (inside the box) on the
Terminal window.
You should provide the ability to pause or run the scrolling operation by using the pushbutton
KEYs. The programming registers in a DE-series KEY port are illustrated in Figure~\ref{fig:KEY}.
There is a {\it Data} register that reflects which KEY(s) are pressed at a given time. For
example, if {\it KEY}$_0$ is currently being pressed, then bit 0 of the data register will be~1,
otherwise~0. The {\it Edgecapture} register can be used to check if a {\it KEY} has been
pressed since last examined, even if it has since been released. If, for example,
{\it KEY}$_0$ is pressed then bit 0 of the {\it Edgecapture} register becomes~1 and
remains so even if {\it KEY}$_0$ is released. To reset the bit to 0, a program has to
explicitly write the value~1 into this bit-position of the {\it Edgecapture} register.
The {\it Interruptmask} register is not used for this exercise, and can be ignored.
\begin{figure}[H]
\begin{center}
\includegraphics{figures/fig_KEY_port.pdf}
\end{center}
\caption{The pushbutton KEY port.}
\label{fig:KEY}
\end{figure}
To communicate with the KEYs, and seven-segment displays if applicable, use memory-mapped
I/O as explained in the tutorial {\it Using Linux on DE-series Boards}. The source code from
this tutorial for translating physical addresses into virtual addresses is included along with
this laboratory exercise. You can use this source code as part of your solution.
\item
Compile your code using a command such as \texttt{gcc -Wall -o part1 part1.c}.
\item
Execute and test your program.
\end{enumerate}
\section*{Part II}
\noindent
In Lab Exercise 1 you were asked to write a kernel module to control the LED lights and
to display a digit, either on a seven-segment display or the Terminal window.
The kernel module responded to interrupts
generated by the KEY pushbutton port. Here you are to write another interrupt-driven
kernel module.
~\\
\noindent
Your kernel module should implement a real-time clock. Display the time on seven-segment
displays, if available, or in the Terminal window. The time
should be displayed in the format \red{MM}:\red{SS}:\red{DD}, where \red{{\it MM}} are minutes,
\red{{\it SS}} are seconds, and \red{{\it DD}} are hundredths of a second.
To keep track of time you
should use a {\it hardware timer} module. The DE-series computers include a
number of hardware timers. For this exercise use an interval timer implemented
in the FPGA called {\it FPGA Timer0}.
The register interface for this timer has the base address {\sf 0xFF202000}. As shown in
Figure~\ref{fig:timer} this timer has six 16-bit registers. To use the timer you need
to write a suitable value into the {\it Counter start value} registers (there are two, one for the
upper 16~bits, and one for the lower 16 bits of the 32-bit counter value). To start the
counter, you need to set the {\it START} bit in the {\it Control} register to~1. Once
started the timer will count down to~0 from the initial value in the {\it Counter start
value} register. The counter will automatically reload this value and continue counting
if the {\it CONT} bit in the {\it Control} register is~1. When the counter reaches~0,
it will set the {\it TO} bit in the {\it Status} register to~1. This bit can be cleared
under program control by writing a~0 into it. If the {\it ITO} bit in the control register is
set to~1, then the timer will generate an ARM* processor interrupt each time
it sets the {\it TO} bit.
The timer clock frequency is 100~MHz. The interrupt~ID of the timer is~72.
Follow the instructions in the tutorial {\it Using Linux on DE-series Boards} to register this
interrupt~ID with the Linux kernel and ensure that it invokes your kernel module whenever
the interrupt occurs.
~\\
\begin{figure}[htb]
\begin{center}
\includegraphics[scale=1]{figures/fig_interval_port.pdf}
\end{center}
\caption{The {\it FPGA Timer0} register interface.}
\label{fig:timer}
\end{figure}
\noindent
Perform the following:
\begin{enumerate}
\item Create a file called {\it timer.c} and type your C code into this file.
\item
Create a suitable {\it Makefile} that can be used to compile your kernel module and create the
file {\it timer.ko}. Insert this module into the kernel using the command \texttt{insmod timer.ko}.
Each time an interrupt occurs your interrupt-service routine should increment the value of the
time. When the time reaches \red{59}:\red{59}:\red{99}, it should wrap around to
\red{00}:\red{00}:\red{00}.
If using seven-segment displays, you can continuously display the updated time. But if
using the Terminal window, it is better to print the time only when the user requests it.
For example, your timer interrupt service routine could read from the KEY port and print the
time whenever {\it KEY}$_1$ has been pressed.
You can remove your module from the Linux kernel by using the command
\texttt{rmmod timer}. When removed, your {\it exit} routine should clear the seven-segment
diplays, if applicable.
\end{enumerate}
\section*{Part III}
\noindent
For this part you are to write a kernel module that implements a {\it stopwatch}. The stopwatch
time should be shown either on seven-segment displays or the Terminal window. The time should
be settable using the SW switches and KEY pushbuttons in your DE-series Computer. The time
should be displayed in the format \red{MM}:\red{SS}:\red{DD} as was done for Part II.
Implement the stopwatch module using two sources of interrupts: the hardware timer
{\it FPGA Timer0} and the KEY pushbutton port. For each timer interrupt you should
{\it decrement} the stopwatch until it reaches \red{00}:\red{00}:\red{00}.
~\\
\noindent
The behavior of the interrupt service routine for the pushbutton KEY port depends on which
DE-series board is being used. If you are using the DE1-SoC or DE10-Standard board, then
follow the instructions in Table~\ref{tab:action1}. For the DE10-Nano board, which has
fewer KEYs and SW switches, implement the actions given in Table~\ref{tab:action2}.
\begin{table}[h]
\caption{Interrupt actions for the DE1-SoC and DE10-Standard boards.}
~\\
\centering
\label{tab:action1}
\begin{tabular}{c|p{13cm}}
{\bf KEY} & {\bf Action} \\ \hline
\rule{0cm}{.375cm}{\it KEY}$_0$ & Toggle the stopwatch to be either running or paused \\
{\it KEY}$_1$ & When pressed, use the values of the SW switches to set the \red{DD} part of the
stopwatch time. The maximum value is \red{99} \\
{\it KEY}$_2$ & When pressed, use the values of the SW switches to set the \red{SS} part of the
stopwatch time. The maximum value is \red{59} \\
{\it KEY}$_3$ & When pressed, use the values of the SW switches to set the \red{MM} part of the
stopwatch time. The maximum value is \red{59} \\
\end{tabular}
\end{table}
\begin{table}[h]
\caption{Interrupt actions for the DE10-Nano board.}
~\\
\centering
\label{tab:action2}
\begin{tabular}{c|p{13cm}}
{\bf KEY} & {\bf Action} \\ \hline
\rule{0cm}{.375cm}{\it KEY}$_0$ & Toggle the stopwatch to be either running or paused \\
{\it KEY}$_1$ & If the stopwatch is running, just print the current time on the Terminal
window. But if the stopwatch is stopped, then set the time using the SW switch values. Set
one stopwatch digit
each time {\it KEY}$_1$ is pressed, in a specific sequence.
For the first press, set the right digit of \red{DD},
for the second press set the left digit of \red{DD}, for the third press set the right
digit of \red{SS}, and so on. After each press of {\it KEY}$_1$ print the current stopwatch time.
\end{tabular}
\end{table}
~\\
\noindent
The data register in the SW switch port for the DE1-SoC and DE10-Standard boards is shown in
Figure~\ref{fig:slider}. The SW switch port for the DE10-Nano board, not shown in the figure,
has only four switches {\it SW}$_0$ to {\it SW}$_3$.
\begin{figure}[H]
\begin{center}
\includegraphics{figures/fig_slider_port.pdf}
\end{center}
\caption{The SW switch port.}
\label{fig:slider}
\end{figure}
\noindent
Perform the following:
\begin{enumerate}
\item Create a file called {\it stopwatch.c} and type your C code into this file.
\item
Create a suitable {\it Makefile} that can be used to compile your kernel module and create the
file {\it stopwatch.ko}. Ensure that the {\it timer} module from Part II has already been
removed from the kernel, because it also responds to interrupts from FPGA Timer0. Then,
insert the stopwatch module into the kernel by using the
command \texttt{insmod stopwatch.ko}.
If you are using seven-segment displays, then as soon as the module is inserted you should
see the time \red{59}:\red{59}:\red{99} start to decrement on the displays. But if you are
using the Terminal window, then you should see the stopwatch time whenever the user presses
{\it KEY}$_1$.
\end{enumerate}
~\\
\noindent
You can remove your module from the Linux kernel by using the command
\texttt{rmmod stopwatch}. When removed, your {\it exit} routine should clear the seven-segment
displays, if applicable.
\vskip 0.8in
\noindent
\newpage
\input{\CommonDocsPath/copyright.tex}
\end{document}
| {
"alphanum_fraction": 0.7481397971,
"avg_line_length": 43.7664473684,
"ext": "tex",
"hexsha": "018be4819af41731715c9b6a8a23b7aef814cc44",
"lang": "TeX",
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f6e974e6824e4b02b4896f4a00c5b4a09ac859e0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fpgacademy/Lab_Exercises_Embedded_Systems",
"max_forks_repo_path": "lab2/doc/lab2.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f6e974e6824e4b02b4896f4a00c5b4a09ac859e0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fpgacademy/Lab_Exercises_Embedded_Systems",
"max_issues_repo_path": "lab2/doc/lab2.tex",
"max_line_length": 117,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "f6e974e6824e4b02b4896f4a00c5b4a09ac859e0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fpgacademy/Lab_Exercises_Embedded_Systems",
"max_stars_repo_path": "lab2/doc/lab2.tex",
"max_stars_repo_stars_event_max_datetime": "2022-03-09T23:21:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-09T23:21:30.000Z",
"num_tokens": 3560,
"size": 13305
} |
\chapter{Statistical Analysis Methodology}
\label{chap:appendix-a}
% Please add the following required packages to your document preamble:
% \usepackage{multirow}
% \usepackage[normalem]{ulem}
% \useunder{\uline}{\ul}{}
\begin{table}[]
\centering
\caption{Data analysis metrics used in the survey results analysis}
\label{annex:metrics}
\resizebox{\textwidth}{!}{
\begin{tabular}{l|l|l|}
\cline{2-3}
& Metrics & Meaning \\ \hline
\multicolumn{1}{|l|}{\multirow{3}{*}{\begin{tabular}[c]{@{}l@{}}Measures of\\ Central Tendency \\ or Location\end{tabular}}} & \textbf{Mean} & \begin{tabular}[c]{@{}l@{}}The mean represents the most probable value. In this\\ survey, with the use of scales with a lower and upper\\ bounds, the mean has the roles of representing the\\ average value of agreement, importance and other\\ measures.Normally, when the skewness of a distribution\\ is high, the meaning of the mean may get distorted by\\ the existence of outlier values. However, since the scales\\ have a low range, with a lower and upper bound on the\\ answer values, this is not much of a concern. Therefore,\\ even in cases of skewness, the mean can be a useful metric.\end{tabular} \\ \cline{2-3}
\multicolumn{1}{|l|}{} & \textbf{Median} & \begin{tabular}[c]{@{}l@{}}Value of the 50\% percentile, for numerical answers. Half\\ the answers are above this value and half are below,\\ pointing to a central tendency around this value. This is\\ a good metric to use, especially in skewed distributions\\ where there are outliers in the collected values, since the\\ meaning of the mean may get slightly distorted by the\\ outlier values.\end{tabular} \\ \cline{2-3}
\multicolumn{1}{|l|}{} & \textbf{Mode} & \begin{tabular}[c]{@{}l@{}}Most frequent response. Though it represents the most\\ popular answers, by itself the metric means nothing else,\\ as there might be answers almost as popular or not.\end{tabular} \\ \hline
\multicolumn{1}{|l|}{\multirow{2}{*}{\begin{tabular}[c]{@{}l@{}}Measures of Spread,\\ Scale or Dispersion\end{tabular}}} & \textbf{\begin{tabular}[c]{@{}l@{}}Standard \\ Deviation\end{tabular}} & \begin{tabular}[c]{@{}l@{}}Quantifies the variation within the data set, by showing\\ how much the distribution spreads to either sides of the\\ center. A high value for the standard deviation means\\ that there are a lot of values away from the mean, from\\ which can be concluded that there is not a general\\ consensus on a certain answers.A low value means that\\ there is consensus, since all the values of the data set are\\ bundled more closely together.\end{tabular} \\ \cline{2-3}
\multicolumn{1}{|l|}{} & \textbf{Range} & \begin{tabular}[c]{@{}l@{}}Difference between highest and lowest value of the data set.\\ Together with the standard deviation, indicates the dispersion\\ of the value of the answers. A range of 0 means that a\\ question had the same value for all responses, for instance.\\ This metric ignores the frequency with which each answer\\ was given, that is why it must be coupled with the standard\\ deviation to be relevant.\end{tabular} \\ \hline
\multicolumn{1}{|l|}{\begin{tabular}[c]{@{}l@{}}Measures of\\ Skewness and\\ Kurtosis\end{tabular}} & \textbf{Skew} & \begin{tabular}[c]{@{}l@{}}This metric indicates the lack of symmetry in a distribution,\\ where the results bunch up in one side of the distribution.\\ For instance, negative skewness values indicate a skew to\\ the left: the values bunch up at the right end of the distribution\\ and the left tail is long, indicating there are outliers in the lower\\ values.\end{tabular} \\ \hline
\end{tabular}
}
\end{table} | {
"alphanum_fraction": 0.4263640753,
"avg_line_length": 248.52,
"ext": "tex",
"hexsha": "93556a72268926dd85100d568b64c09d2d4ca96f",
"lang": "TeX",
"max_forks_count": 7,
"max_forks_repo_forks_event_max_datetime": "2020-08-31T12:43:20.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-09-19T10:33:33.000Z",
"max_forks_repo_head_hexsha": "a212134246e6d5c3ea578f5633c0ce04af4c1069",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "coletiv/supplychain-composer-thesis",
"max_forks_repo_path": "Documents/appendix1.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a212134246e6d5c3ea578f5633c0ce04af4c1069",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "coletiv/supplychain-composer-thesis",
"max_issues_repo_path": "Documents/appendix1.tex",
"max_line_length": 823,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "a212134246e6d5c3ea578f5633c0ce04af4c1069",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "coletiv/supplychain-composer-thesis",
"max_stars_repo_path": "Documents/appendix1.tex",
"max_stars_repo_stars_event_max_datetime": "2019-11-24T15:31:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-03-15T10:03:04.000Z",
"num_tokens": 1065,
"size": 6213
} |
\section{Far-Field Green's Function and Plane Wave Expansion}
The core for the fast multipole method lies in the plane wave expansion of the kernel of the scalar Green's function. This is derived and explained in excellent detail in \cite{yucel2008helmholtz}. We summarized the main points here.
\paragraph{Far-Field Green's Function}
Recall that the electric field dyadic Green's function is given by
\begin{equation}
\overline{\bb{G}}(\br,\br') = \left[\overline{\bb{I}} + \dfrac{1}{k^2} \nabla\nabla \right] g(\br,\br')
\end{equation}
\noindent where the scalar Green's function is
\eq{ g(\br,\br') = \dfrac{e^{ik\vert \br - \br' \vert}}{4\pi \vert \br - \br' \vert} \label{fmmscagreen}}
\noindent and that the far-field dyadic Green's function is
\begin{equation}
\overline{\bb{G}}_f(\br,\br') \approx \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] g(\br,\br')
\end{equation}
\noindent where in using \eqref{fmmscagreen} we have not approximated the phase term.
\paragraph{Plane Wave Expansion}
The derivation is based on two expansions. The first an expansion of the kernel of the scalar Green's function, \cite{yucel2008helmholtz},
\ea{\dfrac{e^{ik\vert \bb{X} + \bb{d} \vert}}{\vert \bb{X} + \bb{d} \vert} &=& i k h_0^{(1)}\left( k \left\vert \bb{X} + \bb{d} \right\vert \right) \\
\ &=& i k \sum_{l=0}^{\infty} (-1)^l (2l+1) j_l(kd) h_l^{(1)}(k X) P_l\left(\hat{\bb{d}} \cdot \hat{\bb{X}}\right) \label{fmmexp1} }
\noindent where $k$ is the free-space wavenumber, $j_l$ is the spherical Bessel function, $h_l^{(1)}$ is the spherical Hankel function, and $P_l$ is the Legendre polynomial. The expansion is valid for $d < X$ where $d = \vert \bb{d} \vert$ and $X = \vert \bb{X} \vert$. The second expansion is
\eq{j_l(kd) P_l\left(\hat{\bb{d}} \cdot \hat{\bb{X}}\right) = \dfrac{i^{-l}}{4\pi} \int e^{i \bb{k}\cdot\bb{d}} P_l(\hat{\bb{k}} \cdot\hat{\bb{X}}) d\Omega_k \label{fmmexp2} }
\noindent where the integral is over the sphere of plane wave directions, $\hat{\bb{k}} = \bb{k}/k = (\sin\theta_k\cos\phi_k, \sin\theta_k\sin\phi_k, \cos\theta_k)$ and differential $d\Omega_k = d^2\hat{\bb{k}} = \sin\theta_k d\theta_k d\phi_k$. Substituting \eqref{fmmexp2} into \eqref{fmmexp1}
\ea{\dfrac{e^{ik\vert \bb{X} + \bb{d} \vert}}{\vert \bb{X} + \bb{d} \vert} &=& \dfrac{i k}{4\pi} \int e^{i \bb{k}\cdot\bb{d}} \sum_{l=0}^{\infty} i^l (2l+1) h_l^{(1)}(k X) P_l(\hat{\bb{k}} \cdot\hat{\bb{X}}) d\Omega_k \label{fmmexp3}}
Next, let source and observation points be $\br'$ and $\br$ with associated local centers, $\br_s$ and $\br_o$, respectively. With these, the vectors $\bb{X}$ and $\bb{d}$ are defined
\ea{\bb{X} &=& \br_o - \br_s \label{fmmX} \\
\bb{d} &=& \br - \br_o - (\br' - \br_s) \label{fmmd} }
The vector $\bb{X}$ points from the local center of the source points to the local center of the observation points. The vector $\bb{d}$ is a vector that would point from the source point to the observation point if the two local regions were translated so that they overlapped. Under the validity condition for the sum, the two local regions must be non-overlapping spheres, in other words $\vert \br - \br_o \vert < X/2$ and $\vert \br' - \br_s \vert < X/2$.
\begin{figure}[H]
\centering
\includegraphics[width=4in]{FastMultipoleMethod/Figures/fmmgreens}
\caption{Geometry for the plane wave expansion of the scalar and dyadic Green's functions.}
\label{}
\end{figure}
Substituting \eqref{fmmX} and \eqref{fmmd} into \eqref{fmmexp3}, and after truncating the sum at degree $L$, the kernel can be approximated
\eq{ \dfrac{e^{ik\vert \br - \br' \vert}}{\vert \br - \br' \vert} \approx \dfrac{ik}{4\pi} \int e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) e^{-i\bb{k}\cdot(\br' - \br_s) }d\Omega_k \label{kernelexp}}
\noindent where $T_L$ is the translation operator given by
\eq{T_L(\bb{k},\bb{X}) = \sum_{l=0}^L i^l (2l+1) h_l^{(1)} (kX) P_l(\hat{\bb{k}}\cdot\hat{\bb{X}}) }
\paragraph{Green's Functions}
Using \eqref{kernelexp}, the scalar Green's function can be written
\eq{ g(\br,\br') \approx \dfrac{ik}{16\pi^2} \int e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) e^{-i\bb{k}\cdot(\br' - \br_s) }d\Omega_k }
From which the far-field dyadic Green's function is approximated
\begin{equation}
\overline{\bb{G}}_f(\br,\br') \approx \dfrac{ik}{16\pi^2} \int \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) e^{-i\bb{k}\cdot(\br' - \br_s) }d\Omega_k \label{fmmdyadicg}
\end{equation}
Treating $\hat{\bb{k}}$ as the radial unit vector, the vector dyad can be written $\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} = \hat{\boldsymbol{\theta}}\hat{\boldsymbol{\theta}} + \hat{\boldsymbol{\phi}}\hat{\boldsymbol{\phi}}$, which shows that the far pattern has only $\hat{\boldsymbol{\theta}}$ and $\hat{\boldsymbol{\phi}}$ vector components. Note, the polarization vectors in the dyadic Green's function are relative to the local center of the source and are not integrated because the integral only expands the scalar part of the kernel.
To illustrate how the dyadic Green's function expansion is used with a source, consider the electric field given by the volume integral
\eq{ \bb{E}(\br) = i \omega \mu \int \overline{\bb{G}}(\br,\br') \cdot \bb{J}(\br') dV }
\noindent where $\bb{J}$ is the current density. Substituting \eqref{fmmdyadicg}, we can write this as
\eq{ \bb{E}(\br) \approx \dfrac{i k}{4 \pi} \int \bb{F}(\hat{\bb{k}}) e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) d\Omega_k \label{fmmeint}}
\noindent where $\bb{F}(\hat{\bb{k}})$ is the far-field radiation pattern of the source
\eq{\bb{F}(\hat{\bb{k}}) = \dfrac{1}{4 \pi} (i \omega \mu) \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \int e^{-i\bb{k}\cdot(\br' - \br_s) }\bb{J}(\br') dV }
A similar form can be found in \cite{hansen2014exact}. In other words, given a far-field vector radiation pattern, which could equally be that of a scatterer, the electric field at observation point $\bb{r}$ is computed by \eqref{fmmeint}, which integrates the product of the pattern, plane wave phases, and translation matrix over all plane wave directions. %In the most general case, the far-field pattern can be any spherical vector field decomposed into $\hat{\theta}$ and $\hat{\phi}$ components
%\eq{\bb{F}(\theta,\phi) = F_{\theta}(\theta,\phi) \hat{\theta} + F_{\phi}(\theta,\phi) \hat{\phi}}
\section{Selection of L}
In general, the maximum degree $L$ that is required to accurately compute the translation operation is proportional to the dimension of the source/observation spheres. There are various formulas for $L$. From \cite{song1997multilevel,yucel2008helmholtz}, one formula is
\eq{L \approx kd + \beta \ln (\pi + kd)}
\noindent where $\beta$ is the number of digits of precision. From \cite{song2001error,yucel2008helmholtz}, the excess bandwidth formula is
\eq{L \approx kd + 1.8 \alpha^{2/3} (kd)^{1/3}}
\noindent where $\alpha = \log_{10}(1/\epsilon)$, and $\epsilon$ is the number of digits of precision. In both case, $L$ should be rounded up.
It needs to be noted that the sum of the translation operator does not become more accurate with more harmonics. In fact, it will break down if the number of harmonics is excessively large. This is due to unstable summation of the Hankel functions when $L$ is too large relative to the argument. Therefore, there is a balance between enough harmonics for accurate translation and too many of them that render the sum inaccurate. This is explained in detail in \cite{yucel2008helmholtz}.
\section{Integration over the Unit Sphere}
Here we explain rules for sampling and integrating spherical harmonics over the sphere. This is needed for understanding how to compute the plane wave expansions of the FMM, as well as spherical harmonic interpolation and filtering. This is based on a hybrid of Gauss-Legendre quadrature integration in $\theta$ and trapezoidal integration in $\phi$. It is exact for band-limited spherical functions assuming a minimum number of sampling points is used, which we derive next. While the method is exact and relatively simple, more efficient spherical integration schemes do exist and that use fewer integration points.
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\subsection{Gauss-Legendre quadrature}
\addtocontents{toc}{\protect\setcounter{tocdepth}{2}}
In general, quadrature is used to compute an integral of a continuous function as a weighted sum of its samples. Gauss-Legendre quadrature (Gaussian quadrature) is exact for polynomials of degree $2n-1$ with $n$ nodes and weights, $x_j$ and $w_j$, respectively. This is normally presented on the domain $x = [-1, 1]$ as
\begin{equation}
\int_{-1}^{1} f(x) dx = \sum_j^n w_j f(x_j)
\end{equation}
The nodes are given by the $j$th zero of the Legendre polynomials $P_n(x_j)$ normalized such that $P_n(1) = 1$, with weights
\begin{equation}
w_j = \dfrac{2}{(1-x_j^2)\left[P_n'(x_j) \right]^2}
\end{equation}
Routines exist for computing the nodes and weights of Gauss-Legendre quadrature. We recommend the routine \texttt{legpts} from the \texttt{http://www.chebfun.org/} library, which we use throughout and do not repeat here.
\begin{figure}[H]
\centering
\includegraphics[width=4in]{FastMultipoleMethod/Figures/gaussquad}
\caption{Notes and weights of Gaussian quadrature}
\label{}
\end{figure}
\clearpage
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\subsection{Numerical Integration of Spherical Harmonics}
\addtocontents{toc}{\protect\setcounter{tocdepth}{2}}
Here we derive the number of sampling points needed to numerically integrate spherical harmonics, which follows the results in \cite{darve2000fast}. Let $f(\theta,\phi)$ be a spherical scalar function composed of a finite number of spherical harmonics
\eq{f(\theta,\phi) = \sum_{l=0}^{L}\sum_{m=-l}^l f_{lm} Y_{lm}(\theta,\phi) \label{fexpansion}}
Integrating this over the unit sphere and separating variables
\ea{\int_0^{2\pi} \int_0^{\pi} f(\theta,\phi) \sin\theta d\theta d\phi %&=& \sum_{l=0}^{L}\sum_{m=-l}^l f_{lm} \int_0^{2\pi} \int_0^{\pi} Y_{lm}(\theta,\phi)\sin\theta d\theta d\phi \\
\ & = & \dfrac{1}{\sqrt{2\pi}} \sum_{l=0}^{L}\sum_{m=-l}^l f_{lm}\int_0^{2\pi} e^{im\phi} d\phi \int_0^{\pi}\widetilde P_l^m(\cos\theta) \sin\theta d\theta d\phi \\ }
The integral over $\phi$ can be computed analytically as
\eq{ \int_0^{2\pi} e^{im\phi} d\phi = \begin{cases}
2\pi, \quad \quad m = 0 \\
\dfrac{i (1- e^{2 i \pi m})}{m} = 0, \quad \quad m \ne 0
\end{cases} \label{intphiana} }
It is clear that when $m\ne0$, the double integral will be zero regardless of the value of the $\theta$ integral. We still want to know the number of discrete integration points in $\phi$ required to make this true. Using trapezoidal integration with periodicity, \eqref{intphiana} is written as a discrete sum over $N$ evenly spaced points
\ea{\int_0^{2\pi} e^{im\phi} d\phi &=& \Delta \phi \sum_{k=0}^{N-1} e^{im\phi_k} \\
\ &= & \dfrac{2\pi}{N} \sum_{k=1}^{N} e^{im k 2\pi/N} \\
\ &= & \dfrac{2\pi}{N} e^{i(N-1) m \pi/N } \dfrac{\sin(m\pi)}{\sin(m\pi/N)} \label{trapintphi}}
\noindent where $\phi_k = (k-1) \Delta\phi$, $k = 1,...,N$, and $\Delta \phi = 2\pi/N$, The last equation comes from the Dirichlet kernel
\eq{\sum_{k=0}^{N-1} e^{ikx} = e^{i(N-1)x/2} \dfrac{\sin(Nx/2)}{\sin(x/2)}}
\eqref{trapintphi} will be zero when $\vert m \vert < N$, because the numerator sine is zero. When $m = N$, applying L'Hopital's rule, the ratio of sine functions is equal to $N$ while the complex exponent is non-zero. Therefore, the number of samples that correctly integrates all harmonics up to $m = L$ is $N = L + 1$. This can be confirmed numerically and is the same as given in \cite{darve2000fast, beentjes2015quadrature}. We can then write the $\phi$ integral as
\eq{ \int_0^{2\pi} e^{im\phi} d\phi = \dfrac{2\pi}{L+1} \sum_{i=1}^{L+1} e^{im\phi_i}, \quad 0 \le \vert m \vert \le L }
When $m=0$, the $\theta$ integral becomes
\ea{ \int_0^{\pi} P_l(\cos\theta) \sin\theta d\theta &=& - \int_0^{\pi} P_l(\cos\theta) d\cos\theta \\
\ &=& \int_{-1}^{1} P_l(\mu) d \mu \\
\ &=& \sum_{j=1}^{N} w_j P_l(\mu_j) }
with the change of variables $\mu = \cos\theta$ and where the integral has been replaced with Gaussian quadrature. Because $P_l(\mu)$ are polynomials of degree $l$, and because the quadrature is exact for polynomial degrees less than $2n-1$, the number of points that will correctly integrate this is $ l < 2 n - 1$. For maximum harmonic degree $L$, the number of quadrature nodes is $N = (L+1)/2$, which should be rounded up.
As an aside, when $m$ is even, the associated Legendre polynomials can be integrated with quadrature, because they are simple polynomials. When $m$ is odd, they contain a factor of $\sqrt{1-\mu^2}$, which means they are not simple polynomials, and so cannot be integrated exactly via quadrature. This can be verified numerically. However, analytical integration of $P_l^m(\mu)$ can be done for any $m$, \cite{beentjes2015quadrature,atkinson2012spherical}.
Using these results, numerical integration of a spherical function composed of spherical harmonics with maximum degree $L$ can be computed exactly (to machine precision) as
\eq{\int_0^{2\pi} \int_0^{\pi} f(\theta,\phi) \sin\theta d\theta d\phi = \dfrac{2\pi}{L+1} \sum_{i=1}^{L+1} \sum_{j=1}^{\lceil (L+1)/2 \rceil} w_j f(\theta_j,\phi_i) \label{intsphereharm} }
\noindent where $\phi_i = (i-1)2\pi/(L+1)$, $i = 1,...,L+1$, and $\theta_j = \arccos\mu_j$, where $\mu_j$ and $w_j$ are the nodes and weights of Gaussian quadrature for $\lceil (L+1)/2 \rceil$ points.
We know that the spherical harmonics are zero-mean over the sphere except the monopole, therefore, if the expansion coefficients are known, one can simply use $f_{00}$ for the mean. If the coefficients are not known, but the function is sampled on the points of quadrature, \eqref{intsphereharm} will compute the mean of $f(\theta,\phi)$ exactly.
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\subsection{Numerical Integration of Products of Spherical Harmonics}
\addtocontents{toc}{\protect\setcounter{tocdepth}{2}}
Here we derive the number of sampling points needed to numerically integrate a product of spherical harmonics over the unit sphere. This can be done using spherical harmonic synthesis. The expansion coefficients of a scalar spherical function are given by
\eq{f_{lm} = \int_0^{2\pi} \int_0^{\pi} f(\theta,\phi) Y^*_{lm}(\theta,\phi)\sin\theta d\theta d\phi }
Substituting \eqref{fexpansion} (ignore for the moment equivalency and orthogonality)
\ea{f_{lm} %&=& \sum_{l'=0}^{L}\sum_{m'=-l'}^{l'} f_{l'm'} \int_0^{2\pi} \int_0^{\pi} Y_{l'm'}(\theta,\phi)Y^*_{lm}(\theta,\phi)\sin\theta d\theta d\phi \\
&=& \sum_{l'=0}^{L}\sum_{m'=-l'}^{l'} f_{l'm'} \dfrac{1}{2\pi}\int_0^{2\pi} e^{i(m'-m)\phi} d \phi \int_0^{\pi} \widetilde P_{l'}^{m'}(\cos\theta) \widetilde P_l^m(\cos\theta) \sin\theta d\theta \\
}
Using the reasoning in the previous section, the maximum harmonic in the $\phi$ integration is $2L$. Therefore the number of equally spaced sampling points that are required for trapezoidal integration in $\phi$ is $2L + 1$. The product of two associated Legendre polynomials is a pure polynomial of degree $2L$ for any $m$. Therefore, the number of required samples for Gaussian quadrature in $\theta$ is $\lceil L + 1/2 \rceil$, which can be immediately rounded up to $L + 1$.
Using these, numerical integration of the product of two spherical
functions, $f(\theta,\phi)$ and $g(\theta,\phi)$, each with maximum harmonic degree $L$ can be computed exactly as
\eq{\int_0^{2\pi} \int_0^{\pi} f(\theta,\phi) g(\theta,\phi) \sin\theta d\theta d\phi = \dfrac{2\pi}{2L+1} \sum_{i=1}^{2L+1} \sum_{j=1}^{L+1} w_j f(\theta_j,\phi_i)g(\theta_j,\phi_i) \label{intharmprod}}
\noindent where $\phi_i = (i-1)2\pi/(2L+1)$, $i = 1,...,2L+1$, and $\theta_j = \arccos\mu_j$, where $\mu_j$ and $w_j$ are the nodes and weights of Gaussian quadrature for $L + 1$ points. It is common to find \eqref{intharmprod} in the literature applied to spherical functions without stipulating whether the underlying function is composed of pure harmonics of a product of spherical harmonics.
\begin{figure}[H]
\centering
\subfigure{\includegraphics[width=3in]{FastMultipoleMethod/Figures/quadsphere5}}
\subfigure{\includegraphics[width=3in]{FastMultipoleMethod/Figures/quadsphere6}}
\caption{Nodes of trapezoidal integration ($\phi_i$) and Gaussian quadrature ($\theta_j$) for integrating products of spherical functions.}
\label{}
\end{figure}
The nodes $\theta_j$ are almost uniformly spaced, and they never sample the poles because of the nodes of Gaussian quadrature do not sample the end points. This is especially convenient for vector spherical harmonics where the polarization is ambiguous at the poles. When $L+1$ is odd, the nodes will sample the equator. This scheme does crowd the poles somewhat.
Finally, \eqref{intharmprod} can be viewed as computing the power of a field over the sphere when the second function is conjugated (or computing the cross-correlation of two fields). If the spherical harmonic expansion coefficients are known, the analogous form of Parseval's theorem can be used to simply sum the magnitude squared of the coefficients. If the coefficients are not known, \eqref{intharmprod} will compute a power-like quantity exactly from samples of the field(s).
%
%Integration of a spherical function $f(\hat{\bb{k}})$ over the unit sphere can be written generally as
%\eq{\int f(\hat{\bb{k}}) d\hat{\bb{k}} = \int_0^{2\pi} \int_0^{\pi} f(\theta,\phi) \sin\theta d\theta d\phi }
%
%\noindent where $\hat{\bb{k}} = (\sin\theta\cos\phi, \sin\theta\sin\phi, \cos\theta)$.
%
%This integral can be computed exactly from the samples of $f(\hat{\bb{k}})$, if $f$ is band-limited, using a hybrid of Gauss-Legendre quadrature in $\theta$ and trapezoidal integration in $\phi$. After a change of variables, the integral is
%\begin{eqnarray}
%\int f(\hat{\bb{k}}) d\hat{\bb{k}} & =& \int_0^{2\pi} \int_0^{\pi} f(\theta,\phi) \sin\theta d\theta d\phi \\
%\ & =& \int_0^{2\pi} \int_{-1}^1 f(\mu,\phi) d\mu d\phi \\
%\ & = & \dfrac{2\pi}{2L+1} \sum_{i=1}^{2L+1} \sum_{j=1}^{L+1} w_j f(\theta_j,\phi_i) \label{sphereint}
%\end{eqnarray}
%
%
%
%
%\noindent where $w_j$ are the weights corresponding to Gaussian nodes $\mu_j$ on $[-1,1]$. The angular sampling points are $\theta_j = \arccos\mu_j$ and $\phi_i = (i-1)2\pi/(2L+1)$. $L$ is the maximum degree required to expand the function $f(\theta,\phi)$ in spherical harmonics. Interestingly, the nodes $\theta_j$ are almost uniformly spaced, and they never sample the poles because of the nodes of Gaussian quadrature do not sample the end points. When $L+1$ is odd, the nodes will sample the equator.
%
%Address the no-quite polynomials for straight spherical harmonics. Spherical transforms and green's function contain products of these harmonics.
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\subsection{Green's Function Integration}
\addtocontents{toc}{\protect\setcounter{tocdepth}{2}}
Following \cite{yucel2008helmholtz}, we give the number of sample points required to correctly integrate the plane wave expansions in the FMM. Using the addition theorem for plane waves
\eq{e^{i \bb{k}\cdot \bb{d}} = \sum_{l=0}^{\infty} i^l (2l+1) j_l(kd) P_l(\hat{\bb{k}}\cdot\hat{\bb{d}})}
\eqref{fmmexp3} can be written
\ea{\dfrac{e^{ik\vert \bb{X} + \bb{d} \vert}}{\vert \bb{X} + \bb{d} \vert} &=& \dfrac{i k}{4\pi} \sum_{l=0}^{\infty} \sum_{l'=0}^{\infty} i^l (2l+1) i^{l'} (2l'+1) h_l^{(1)}(k X) j_l'(kd) \int P_l(\hat{\bb{k}} \cdot\hat{\bb{X}}) P_l'(\hat{\bb{k}}\cdot\hat{\bb{d}}) d\Omega_k }
Using the addition theorem for Legendre polynomials,
\eq{P_l(\hat{\br} \cdot \hat{\br}') = \dfrac{4\pi}{2l + 1} \sum_{m=-l}^l Y_{lm}(\theta,\phi) Y_{lm}^*(\theta',\phi')}
the integral can be expanded as
\eq{\int P_l(\hat{\bb{k}} \cdot\hat{\bb{X}}) P_l'(\hat{\bb{k}}\cdot\hat{\bb{d}}) d\Omega_k = \dfrac{4\pi}{2l + 1} \dfrac{4\pi}{2l' + 1} \sum_{m=-l}^l \sum_{m=-l'}^{l'} Y_{lm}^*(\theta_X',\phi_X') Y_{lm}^*(\theta_d',\phi_d') \int \left(Y_{lm}(\theta_k,\phi_k)\right)^2 d\Omega_k }
Which shows that the spherical integral in \eqref{fmmexp3} is really integrating a product of spherical harmonics. Therefore, using the results from the previous section, the integral over planes waves in the Green's function kernel \eqref{kernelexp} can be computed exactly over discrete values of the wave vector $\hat{\bb{k}}_{ij}$ as
\eq{ \dfrac{e^{ik\vert \br - \br' \vert}}{\vert \br - \br' \vert} \approx \dfrac{ik}{4\pi} \dfrac{2\pi}{2L+1} \sum_{i=1}^{2L+1} \sum_{j=1}^{L+1} w_j e^{ik \hat{\bb{k}}_{ij}\cdot(\br - \br_o)} T_L(k \hat{\bb{k}}_{ij},\bb{X}) e^{-ik \hat{\bb{k}}_{ij} \cdot(\br' - \br_s) } }
which is the result in \cite{yucel2008helmholtz}. The approximation comes from truncating the sum in \eqref{kernelexp}, not the spherical integration.
\section{Aggregation/Disaggregation}
Here we give a basic idea of how to aggregate, translate, and disaggregate fields in the context of the FMM operations following the explanation in \cite{yucel2008helmholtz}. The routines for computing the translation operator are given in Section \ref{transoperator}, while routines for interpolating and filtering scalar and vector fields are given in Sections \ref{sec:scasphfilter} and \ref{sec:vecsphfilter}.
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\subsection{Octree}
\addtocontents{toc}{\protect\setcounter{tocdepth}{2}}
Typically the scatterers in an FMM problem are organized on a hierarchical octree. An octree is a data structure in which each node has eight children. This is combined with the geometric process of subdividing a cubic volume into eight equal octants. The eight subcubes are called the children of the larger parent cube and visa versa. A cube at every level has an outgoing and incoming far-field pattern associated with it, say $\bb{F}(\hat {\bb{k}})$, which is sampled on the sphere according to the rules of quadrature. This field expansion is centered on the cube and has harmonic bandwidth (i.e., maximum degree vector spherical harmonic, $L$) at least as large as that required for the diameter of the enclosing sphere, and further set by the desired accuracy of the translation operations. This means that fields are coarsely sampled in $(\theta,\phi)$ at higher levels (smaller cubes), and more finely sampled at lower (larger cubes) levels.
Fields are aggregated up the hierarchy, translated at the highest level possible, then disaggregated down the hierarchy. There is a constraint that fields cannot be translated to neighboring boxes at the same level (due to the separation requirement of the translation). This creates a complication when disaggregating. For more details see \cite{yucel2008helmholtz}. In general, aggregation and disaggregation do not have to be restricted to octree structures as long as the bandwidth and separation between the groups of scatterers is obeyed.
\begin{figure}[h]
\centering
\includegraphics[width=6in]{FastMultipoleMethod/Figures/aggdiss}
\caption{Aggregation, translation, and disaggregation. }
\label{}
\end{figure}
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\subsection{Aggregation}
\addtocontents{toc}{\protect\setcounter{tocdepth}{2}}
To reiterate, the far pattern of each child cube has lower harmonic content than the parent (due to its smaller size), and therefore coarser spatial sampling in $(\theta,\phi)$. The process of aggregating fields consists of 1) interpolating each far pattern of the children cubes up to the finer sampling of the parent, 2) shifting the phase centers of the children's patterns to that of the parent, then 3) summing the fields of the all the children. The shift is done by multiplication by a complex exponential of plane wave phases, which is equivalent to a diagonal matrix-vector multiply and is trivial to compute.
Let $\bb{F}_{n,l}(\hat {\bb{k}})$ be the vector field for the $n$th group (cube) at level $l$. Let $P_{l+1}^{l}$ be the interpolation operator that interpolates a field from level $l+1$ to level $l$. The aggregated field for the $n$th group at the level of the parents is the sum over all interpolated and shifted fields of the children belonging to each parent, \cite{yucel2008helmholtz}:
\begin{equation}
\bb{F}_{n,l}(\hat {\bb{k}}_{l}) = \sum_{m \in G_c} e^{i\bb{k}_{l+1} \cdot (\bb{x}_{n} - \bb{x}_{m}) } P_{l+1}^{l}\left[\bb{F}_{m,l+1}(\hat{ \bb{k}}_{l+1})\right]
\end{equation}
\noindent where $G_c$ are the list of children that belong to parent group $n$, and $\hat {\bb{k}}_{l}$ are the spherical directions sampled for level $l$.
The process of interpolation does not change the harmonic content of the patterns of the children. However, multiplying the pattern by the phase exponential of the plane wave shift is equivalent to convolving the spherical harmonic spectra. This is why the fields are first interpolated, then translated. Another way to think about this is, even though the patterns of the children may contain lower harmonic content when centered on the cubes of the children, the same pattern that is offset from a different center, now belongs to a larger enclosing sphere, and therefore has more harmonic content requiring finer spherical sampling.
\addtocontents{toc}{\protect\setcounter{tocdepth}{1}}
\subsection{Disaggregation}
\addtocontents{toc}{\protect\setcounter{tocdepth}{2}}
Disaggregation sweeps from the lowest level of the octree (largest cubes) to the highest level (smallest cubes) and consists of three steps: 1) shift the field of a parent to the phase center of the child then filter the parent's field to the child's level (i.e., anterpolate), 2) translate the outgoing patterns between groups at the same level that are not near-neighbors but whose parents are near-neighbors (i.e., called the neighborhood of the child), 3) sum the filtered and translated fields. This is done recursively from the bottom level to the top level for all groups and can be written.
\begin{equation}
\bb{G}_{m,l}(\hat {\bb{k}}_{l}) = P_{l-1}^{l}\left[e^{i\bb{k}_{l-1} \cdot (\bb{x}_{m} - \bb{x}_{n}) } \bb{G}_{n,l-1}(\hat{ \bb{k}}_{l-1})\right] + \sum_{p \in G_w} T_L({\bb{k}}_{l}, \bb{x}_{m} - \bb{x}_{p}) \bb{F}_{p,l}(\hat {\bb{k}})
\end{equation}
\noindent where $P_{l-1}^{l}$ is the filtering operator that filters the parent's field at level $l-1$ to the child sampling, $m$ is index of the child at level $l$, $n$ is the index of the parent at the parent's level, and $G_w$ is the list children at level $l$ that are well-separated from $m$ (i.e., children that are not near-neighbors, but whose parents are near-neighbors). The purpose of filtering the field of the parent is to reduce its total harmonic content to be of the same degree as that of the child.
\section{Translation Operator}
\label{transoperator}
\subsection{Basic Translation Operator}
The FMM translation operator is
\begin{equation}
T_L(\bb{k},\bb{X}) = \sum_{l=0}^L i^l (2l+1) h_l^{(1)} (kX) P_l(\hat{\bb{k}}\cdot\hat{\bb{X}})
\end{equation}
\noindent where $\bb{X}$ is the Cartesian vector that points from the origin of the source frame to the origin of the observation frame, $k$ is the complex background wavenumber, $\hat{\bb{k}}$ is the Cartesian wave vector direction, $P_l(x)$ is the Legendre polynomial, and $L$ is the maximum degree of the sum. When computing this, it can be written terms of the dot product $\cos\theta = \hat{\bb{k}}\cdot\hat{\bb{X}}$ in order to externalize the vector computations.
\begin{equation}
T_L(kX,\theta) = \sum_{l=0}^L i^l (2l+1) h_l^{(1)} (kX) P_l(\cos\theta) \label{tltheta}
\end{equation}
\begin{figure}[h]
\centering
\includegraphics[width=3.5in]{FastMultipoleMethod/Figures/TLtheta}
\caption{Real part of the translation operator, \eqref{tltheta}, for $\hat{\bb{X}} = [1, 0, 0]$, $L = 12$, and $kX = 50$. The grid is highly oversampled compared to the sampling required for quadrature integration over the sphere. Note, the operator is peaked in the direction of propagation and contains a 'back lobe'-like feature.}
\label{}
\end{figure}
The routine \texttt{TLth} returns the translation operator \eqref{tltheta} given scalars $kX$, $L$, and array of $\cos\theta$, which can be any size. To save memory, the Legendre polynomials are computed inline with the recursion \eqref{plrec}.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/TL/TLth.m}
}
%
%
%
%{\footnotesize
%\VerbatimInput{\code/FastMultipoleMethod/TLth.m}
%}
%
%{\footnotesize
%\VerbatimInput{\code/FastMultipoleMethod/TLmem.m}
%}
\subsection{Translation Operator Interpolation}
Computing the translation operator as a straight sum is a computational bottleneck for large problems. Much work has gone into finding an optimal computation scheme, and the result is a fast interpolator. The translation operator is precomputed directly at a coarse sampling, after which any value is found by interpolation to a selectable level of error. Because the translation operator is band limited, it can be computed exactly from the samples using the approximate prolate spheroid (APS) method. In practice, only a small subset of samples in the vicinity of the interpolation point needs to be used.
The interpolation formula using APS is given by \cite{bucci1991optimal,yucel2008helmholtz}
\begin{equation}
\widetilde{T}_L(\theta) = \sum_{m = m_o - p + 1}^{m_o + p} T_L(m\Delta\theta)S_N(\theta-m\Delta\theta,\theta_o)D_M(\theta - m \Delta\theta)
\end{equation}
\noindent where $\widetilde{T}_L(\theta)$ is the interpolated translation operator, $D_M(\theta)$ is the periodic sinc function (or Dirichlet kernel), $S_N(\theta,\theta_o)$ is a windowing function, and $T_L(m\Delta\theta)$ are precomputed samples of the translation operator. The windowing function is given by
\begin{equation}
S_N(\theta,\theta_o) = \dfrac{R_N(\theta,\theta_o)}{R_N(0,\theta_o)}
\end{equation}
\begin{equation}
R_N(\theta,\theta_o) = \dfrac{\sinh\left[ (2N+1) \sinh^{-1} \sqrt{\sin^2(\theta_o/2) - \sin^2(\theta/2)}\right]}{\sqrt{\sin^2(\theta_o/2) - \sin^2(\theta/2)}}
\end{equation}
The Dirichlet kernel is given by
\begin{equation}
D_M(\theta) = \dfrac{\sin\left[(2M+1)\theta/2 \right]}{(2M+1)\sin(\theta/2)}
\end{equation}
In these expressions, $L$ is the truncation degree of the sum and $M=sL$ is the total number of precomputed sampling points where $s$ is the over-sampling ratio and is an integer. The required sample spacing is $\Delta \theta = 2\pi/(2M+1)$. This spacing is over a $2\pi$ circumference, even though we only need $\theta = [0, \pi]$. This comes from the original papers on optimal interpolation over a sphere, but the formulation persists in the literature. $N = M-L = (s-1)L$ is the number of over-sampling points. $m_o = \textrm{Int}[\theta/\Delta\theta]$ is the integer index to the left of the interpolation point, where $\textrm{Int}[\cdot]$ is the integer part or floor function. $\theta_o = p\Delta\theta$ is the width of the interpolation window, where $p$ is the number of samples on each side of the interpolation point. The choice of $s$ and $p$ is important for maintaining accuracy while minimizing computation. Good empirical values are $s = 5$, $p= 3$.
\begin{figure}[H]
\centering
\includegraphics[width=3in]{FastMultipoleMethod/Figures/indexing}
\caption{Sampling and indexing of the interpolation. Example for $M = 8$, $p = 3$. $m=0$ corresponds to $\theta = 0$. Note no sample at $\theta=\pi$.}
\label{fig4}
\end{figure}
Even though we will only interpolate $\theta = [0, \pi]$, we require precomputed samples outside this range when interpolating near the ends. The translation operator is an even function of $\theta$, therefore, there are two options to obtain the out of bounds points: 1) Only compute sampling points in the range $\theta = [0, \pi]$ and loop the summation index $m$ back on itself if we go beyond the ends, or 2) precompute the necessary values outside of the range, and let the index roam free. We choose the first for simplicity.
Figure \ref{fig4} illustrates the sample spacing as it relates to the number of sample points as well as the indexing scheme for precomputing points outside the range $\theta = [0, \pi]$. There are $p-1$ samples to the left of 0, $p$ samples after $\pi$, $M + 2p$ total sample points, and the array index is $I = m + p$, where $m = [0,M]$. Figure \ref{fig5} shows the interpolator.
\begin{figure}[H]
\centering
\includegraphics[width=4in]{FastMultipoleMethod/Figures/TLthetainterp}
\caption{Translation operator interpolator. $L = 4$, $s = 5$, $p = 3$, $M = 20$ and there are $M + 2p$ total sampling points. $k = 2\pi$, $r = 10$. Note there is no sampling point at $\theta = \pi$.}
\label{fig5}
\end{figure}
The routine \texttt{interpTL} takes as inputs the outputs from the preparatory function \texttt{interpTLprep} as well as the interpolation point(s). The helper functions are the windowing and Dirichlet kernel, \texttt{SN} and \texttt{DM}.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/TL/interpTL.m}
}
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/TL/interpTLprep.m}
}
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/TL/SN.m}
}
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/TL/DM.m}
}
\section{Scalar Spherical Filter}
\label{sec:scasphfilter}
In this section, we give routines for interpolating and filtering scalar spherical harmonics following \cite{yucel2008helmholtz}. These routines can stand on their own, because they are excellent for general applications of spherical harmonic expansions. The scalar filters can be used in the scalar form of the FMM for interpolating and filtering fields up and down the multi-level hierarchy structure. These lay the ground work for the vector spherical filters derived later.
\subsection{Spherical Harmonic Transforms}
The spherical harmonics form a complete basis, so any band-limited spherical signal can be represented as a finite sum of harmonics
\begin{equation}
f(\theta,\phi) = \sum_{l=0}^{L} \sum_{m = -l}^{l} f_{lm} Y_{lm}(\theta,\phi)
\label{c6eq1}
\end{equation}
Spherical harmonics can be written in terms of the normalized Legendre polynomials as
\begin{equation}
Y_{lm}(\theta,\phi) = \dfrac{1}{\sqrt{2\pi}}\widetilde{P}_l^m(\cos \theta)e^{im\phi}
\end{equation}
Using orthogonality of the spherical harmonics, the expansion coefficients are
%\begin{equation}
%\int_{0}^{2\pi} \int_{0}^{\pi} Y_{lm}(\theta,\phi) Y^*_{l'm'}(\theta,\phi) \sin\theta d\theta d\phi = \delta_{ll'}\delta_{mm'}
%\end{equation}
%from which is follows that
\begin{equation}
f_{lm} = \int_{0}^{2\pi} \int_{0}^{\pi} f(\theta,\phi) Y^*_{l'm'}(\theta,\phi) \sin\theta d\theta d\phi
\label{c6eq3}
\end{equation}
Equation \eqref{c6eq3} is the forward transform, or spherical harmonic analysis, while equation \eqref{c6eq1} is the inverse transform, or spherical harmonic synthesis.
\subsection{Forward Scalar Spherical Transform}
Computing \eqref{c6eq3} consists of two steps: 1) forward Fourier transform in $\phi$, 2) forward Legendre transform in $\theta$. Writing out \eqref{c6eq3}
\begin{equation}
f_{lm} = \int_{0}^{\pi} \widetilde{P}_l^m(\cos \theta) \sin \theta d \theta \dfrac{1}{\sqrt{2\pi}} \int_{0}^{2\pi} f(\theta,\phi) e^{-im\phi} d\phi
\end{equation}
\noindent where $\widetilde{P}_l^m(\cos \theta)$ are the fully normalized Legendre polynomials. The $\phi$ integral is computed first in order to create a set of 1D functions of $\theta$ for each $m$
\begin{equation}
f_m(\theta) = \dfrac{1}{\sqrt{2\pi}} \int_{0}^{2\pi} f(\theta,\phi) e^{-im\phi} d\phi
\end{equation}
Evaluating this with trapezoidal integration
\begin{equation}
f_m(\theta) = \dfrac{\sqrt{2\pi}}{I} \sum_{i=1}^{I} f(\theta,\phi_i) e^{-im\phi_i}
\end{equation}
\noindent where $I$ is the number of grid points in longitude and $\phi_i = 2\pi i/ I $ for $ i = 0,...,I-1$. This can be computed via FFT. The $\theta$ integral is next computed for each $f_m(\theta)$ by using the forward Legendre transform with a change of variables
\ea{f_{lm} &=& \int_{0}^{\pi} f_m(\theta) \widetilde{P}_l^m(\cos \theta) \sin \theta d \theta \\
\ & = & -\int_{0}^{\pi} f_m(\theta) \widetilde{P}_l^m(\cos \theta) d\cos\theta \\
\ & = & \int_{-1}^{1} f_m(\theta( \mu)) \widetilde{P}_l^m(\mu) d\mu, \quad \mu = \cos\theta }
%
%\begin{equation}
%f_{lm} = \int_{0}^{\pi} f_m(\theta) \widetilde{P}_l^m(\cos \theta) \sin \theta d \theta
%\end{equation}
%
%Making a change of variables
%\begin{eqnarray}
%%f_{lm} &=& \int_{0}^{\pi} f_m(\theta) \widetilde{P}_l^m(\cos \theta) \sin \theta d \theta \\
%f_{lm} & = & -\int_{0}^{\pi} f_m(\theta) \widetilde{P}_l^m(\cos \theta) d\cos\theta \\
%%\ & = & \int_{\pi}^{0} f_m(\theta) \widetilde{P}_l^m(\cos \theta) d\cos\theta \\
%\ & = & \int_{-1}^{1} f_m(\theta( \mu)) \widetilde{P}_l^m(\mu) d\mu, \quad \mu = \cos\theta
%\end{eqnarray}
This can now be evaluated with Gaussian quadrature on the interval $\mu = [-1, 1]$ as
\begin{equation}
f_{lm} = \sum_{j=1}^J f_m(\theta_j) \widetilde{P}_l^m(\mu_j) w_j \label{forwardlegendre}
\end{equation}
\noindent where $J$ is the number of points in latitude and the weights, $w_j$, correspond to the nodes $\theta_j = \arccos\mu_j$. One first selects the number of grid points in latitude, retrieves the Gaussian nodes for that number of integration points, then evaluates the points $\theta_j$. Because this operation is integrating products of spherical harmonics, the integral is exact if the number of grid points in latitude and longitude are $J = L+1$ and $I = 2L+1$ for coefficients through $L$. By virtue of the Gaussian quadrature node spacing, the field is never evaluated at the poles.
The routine \texttt{sst} performs the forward scalar spherical transform and returns the spectral coefficients $f_{lm}$. The coefficients are returned on a 1D array of size $L^2 + 2L + 1$, linearly indexed. It takes as inputs the maximum degree $L$ for which harmonics are desired. The spherical function $f(\theta_j,\phi_i)$ is sampled on an $I \times J$ meshgrid, where $I = 2L'+1$ and $J = L'+1$ are such that $L' \ge L$. The sample points need to be $\phi_i = 2\pi i/ I $ for $ i = 0,...,I-1$, and $\theta_j = \arccos \mu_j $, where $\mu_j$ are the $J$ quadrature nodes on $\mu = [-1, 1]$, which are also inputs. In other words, the grid can be sampled more finely than the maximum degree of the harmonics desired for the coefficients. $f_m(\theta_j)$ is computed in place with an FFT along the first dimension of the array. Matlab's \texttt{fft} produces a two-sided DFT, and $2L+1$ is always odd, so the rows of the matrix $f_m(\theta_j)$ correspond to the spectral components $m = 0, 1, ..., (I-1)/2, -(I-1)/2, ..., -1$. The rows of the 1D FFT are indexed
\begin{equation}
\textrm{idx}(I,m) = \left\{ \begin{array}{cc} m + 1, & m \ge 0 \\ I - m + 1, & m < 0 \\ \end{array} \right.
\end{equation}
%Finally, this uses our routine \texttt{Plm} for associated Legendre polynomials, which ensures that one factor of $(-1)^m$ in included in the definition of the spherical harmonics.
%uses onThe Legendre polynomials are computed normalized with Matlab's \texttt{legendre} function and \texttt{'norm'} option. An extra factor of $(-1)^m$ is included in that definition. Therefore, we need to cancel this to be content with our use of the Condon-Shortly phase that we include in our definition of spherical harmonics.
%
%Note: the definition of Legendre polynomials has a factor of $(-1)^m$. Our definition of spherical harmonics includes the Condon-Shortly phase, which means there are really two factors of $(-1)^m$ in the entire definition. Our spherical harmonics are consistent with the wave function translation matrices. This derivation of the filter does not include the extra $(-1)^m$ on the spherical harmonics, only the one in the definition of the Legendre polynomials, which is also included with the \texttt{'norm'} option. The filter works was shown to work with just the one factor. Therefore, we take out the second factor of $(-1)^m$ by multiplying by $(-1)^m$. If the definition of the the spherical harmonics does not include that phase, then that part of the code should be removed.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/sst.m}
}
\subsection{Inverse Scalar Spherical Transform}
The inverse transform consists of taking the expansion coefficients $f_{lm}$ and applying 1) the inverse Legendre transform in $\theta$, 2) the inverse Fourier transform in $\phi$. The inverse Legendre transform is
\begin{equation}
f_m(\theta_j) = \sum_{l = \vert m \vert}^{L} f_{lm} \widetilde{P}_l^m(\mu_j)
\label{eqist1}
\end{equation}
\noindent where again $\mu_j = \cos\theta_j$. The inverse Fourier transform in $\phi$ is
\begin{equation}
f(\theta_j,\phi_i) = \dfrac{1}{\sqrt{2\pi}}\sum_{m = -L}^{L} f_m(\theta_j) e^{im\phi_i}
\label{eqist2}
\end{equation}
The routine \texttt{isst} computes the inverse scalar spherical transform. The inputs are the array of harmonics $f_{lm}$ of size $L^2 + 2L + 1$ linearly indexed, and the maximum degree $L$. It then returns the $I \times J$ spherical function $f(\theta_j,\phi_i)$ as a meshgrid such that $I = 2L'+1$ and $J = L' + 1$, where $\phi_i = 2\pi i/ I $ for $ i = 0,...,I-1$, and $\mu_j = \cos\theta_j$. $J$ is determined by the length of the input $\mu_j$, which can be larger than the corresponding sampling of the $L$ harmonics in $f_{lm}$ (this allows the routine to preform interpolation automatically). An additional factor of $I$ is needed because Matlab's \texttt{ifft} divides by the number of samples in $\phi$.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/isst.m}
}
\clearpage
\subsection{Scalar Spherical Filter}
The forward and inverse scalar spherical transforms can be used together to accomplish interpolation or filtering (anterpolation) of a spherical function.
Interpolation takes a function $f(\theta,\phi)$ with coarse sampling and $L$ harmonics, and upsamples it to a function $f(\theta',\phi')$ with finer sampling and $K$ harmonics, where $K > L$. Because the original signal is band limited with maximum harmonic $L$, the interpolated signal contains the same harmonic content, and is interpolated exactly. This is the spherical harmonic analog of upsampling a Nyquist-sampled exactly to an arbitrarily fine sampling with sinc interpolation. Interpolation is accomplished by first computing the spectral components $f_{lm}$ of $f(\theta,\phi)$ using the scalar spherical transform to degree $L$, zero-padding the coefficients to degree $K$ to form $f_{lm}'$, then applying the inverse scalar spherical transform to create $f(\theta',\phi')$.
Filtering takes $f(\theta',\phi')$ with fine sampling and $K$ harmonics to a function $f(\theta,\phi)$ with coarse sampling and $L$ harmonics, where $L < K$. This is analogous to filtering a signal and resampling it at lower rate. Filtering necessarily eliminates higher frequency harmonics. Filtering is accomplished by computing the spectral components $f_{lm}'$ of $f(\theta',\phi')$ via the SST to degree $K$, truncating down to degree $L$ to form $f_{lm}$, then applying the ISST to create $f(\theta,\phi)$.
\begin{equation}
\begin{array}{cccccccccc}
\textrm{Interpolation:} & f(\theta,\phi) & \stackrel{\textrm{sst}}{\rightarrow} & f_{lm}, L & \rightarrow & \textrm{zero pad} &\rightarrow & f_{lm}', K & \stackrel{\textrm{isst}}{\rightarrow} & f(\theta',\phi') \nonumber \\
\textrm{Filter:} & f(\theta',\phi') & \stackrel{\textrm{sst}}{\rightarrow} & f_{lm}', K & \rightarrow & \textrm{trunctate} &\rightarrow & f_{lm}, L &\stackrel{\textrm{isst}}{\rightarrow} & f(\theta,\phi) \nonumber \\
\end{array}
\end{equation}
%
% \begin{figure}[h]
% \centering
% \includegraphics[width=3.5in]{FastMultipoleMethod/Figures/samples}
% \caption{Sample spacing for $L = 4$.}
% \label{}
%\end{figure}
%
\begin{figure}[H]
\centering
\subfigure{
\includegraphics[width=3in]{FastMultipoleMethod/Figures/filt1} }
\subfigure{
\includegraphics[width=3in]{FastMultipoleMethod/Figures/filt2} }
\caption{Interpolation of a complex scalar field (real part). Left: coarsely sampled field. Right: finely sampled interpolated field. }
\end{figure}
\begin{figure}[H]
\centering
\subfigure{
\includegraphics[width=3in]{FastMultipoleMethod/Figures/filt3} }
\subfigure{
\includegraphics[width=3in]{FastMultipoleMethod/Figures/filt4} }
\caption{Filtering of a complex scalar field (real part). Left: finely sampled field. Right: coarsely sampled filtered field. }
\end{figure}
The routine \texttt{ssfilt} computes the scalar spherical interpolation or filtering operation. It takes the maximum harmonic degrees $L$ and $K$, where $L \le K$. The harmonic content is either interpolated from $L$ to $K$ or filtered from $K$ to $L$. For interpolation, the input function is $f(\theta,\phi)$, sized $2L'+1 \times L' + 1$ on a meshgrid, and the routine returns $f(\theta',\phi')$, sized $2K'+1 \times K'+1$, where $L'$ and $K'$ are set by the length of $\mu_j$ and $\mu_k$, respectively. The sampling of the grid can have more harmonics than the interpolation/filter harmonics. As always, it assumes $\phi$ is uniformly spaced and $\theta$ is spaced according to the Gaussian quadrature nodes. This routine calls \texttt{sst} and \texttt{isst} that recompute the underlying Legendre polynomials at each run and is not built for speed.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/ssfilt.m}
}
\clearpage
\subsection{Fast Scalar Spherical Filter}
\label{sec:fastscasphfilt}
The bottle neck of the scalar spherical filter lies in the sums of the forward and inverse Legendre transforms, especially when $L$ is large. The fix is to combine the two transforms, after which the sums can be simplified and further accelerated with the 1D FMM, which is described in Section \ref{sec:1dfmm}. For a discussion of the computational complexity, see \cite{yucel2008helmholtz}.
Assume that interpolation is being done and $L < K$. The quantities $\theta_j$, $\mu_j$, and $w_j$ are associated with $L$ harmonics of field $f(\theta_j,\phi_i)$, and the quantities $\theta_k$, $\mu_k$, and $w_k$ are associated with $K$ harmonics of field $f'(\theta_k,\phi_k)$ and the routine is interpolating $f(\theta_j,\phi_i)$ to $f'(\theta_k,\phi_k)$. Start by substituting the forward Legendre transform, \eqref{forwardlegendre}, into the inverse Legendre transform, \eqref{eqist1},
\begin{equation}
f_m'(\theta_k) = \sum_{l = \vert m \vert}^{K} \left(\sum_{j=1}^J f_m(\theta_j)\widetilde{P}_l^m(\mu_j)w_j\right) \widetilde{P}_l^m(\mu_k)
\end{equation}
When $L<K$, the sum over $K$ can be restricted to $L$ because $f_m(\theta_j)$ does not have harmonics when $\vert m\vert> L$. This is equivalent to zero-padding the harmonics $f_{lm}$ in the standard spherical filter. Changing the limit of the sum and exchanging the order of the sums
\begin{equation}
f_m'(\theta_k) = \sum_{j=1}^J f_m(\theta_j)w_j \sum_{l = \vert m \vert}^{L} \widetilde{P}_l^m(\mu_j) \widetilde{P}_l^m(\mu_k) \label{combineLtrans}
\end{equation}
%\begin{equation}
%f_m'(\theta_k) = \sum_{l = \vert m \vert}^{L} \left(\sum_{j=1}^J f_m(\theta_j)\widetilde{P}_l^m(\mu_j)w_j\right) \widetilde{P}_l^m(\mu_k)
%\end{equation}
The sum over $L$ can be simplified with the Christoffel-Darboux formula
\begin{equation}
\sum_{l = \vert m \vert}^{L} \widetilde{P}_l^m(\mu_j) \widetilde{P}_l^m(\mu_k) = \epsilon_{L+1}^m \dfrac{ \widetilde{P}_{L+1}^m(\mu_k) \widetilde{P}_L^m(\mu_j) - \widetilde{P}_{L}^m(\mu_k) \widetilde{P}_{L+1}^m(\mu_j)}{\mu_k - \mu_j} \label{cdform}
\end{equation}
where
\begin{equation}
\epsilon_{l}^m = \sqrt{\dfrac{l^2 - m^2}{4l^2 - 1}}
\end{equation}
Substituting \eqref{cdform} into \eqref{combineLtrans} and separating terms
%\begin{equation}
%f_m'(\theta_k) = \sum_{j=1}^J f_m(\theta_j)w_j\epsilon_{L+1}^m \dfrac{\widetilde{P}_{L+1}^m(\mu_k) \widetilde{P}_{L}^m(\mu_j) - \widetilde{P}_{L}^m(\mu_k) \widetilde{P}_{L+1}^m(\mu_j)}{\mu_k - \mu_j}
%\end{equation}
%
%Separating the terms
\begin{equation}
\dfrac{f_m'(\theta_k)}{\epsilon_{L+1}^m} = \widetilde{P}_{L+1}^m(\mu_k)\sum_{j=1}^J \dfrac{f_m(\theta_j)w_j\widetilde{P}_L^m(\mu_j)}{\mu_k - \mu_j} - \widetilde{P}_{L}^m(\mu_k)\sum_{j=1}^J \dfrac{f_m(\theta_j)w_j\widetilde{P}_{L+1}^m(\mu_j)}{\mu_k - \mu_j} \label{cdform2}
\end{equation}
This has the form of a matrix-vector multiply over the kernel of type $1/(x-x')$, therefore it is possible to use the 1D FMM to accelerate this computation.
When the sum rolls over the case $\mu_k = \mu_j$, L'Hopital's rule can be applied to either $\mu_k$ or $\mu_j$ to resolve the singularity. The condition $\mu_k = \mu_j$ only ever occurs at $\mu = 0$, because the nodes of quadrature for different degrees of harmonics never overlap expect at $\mu = 0$, and only when the number of quadrature points in $\theta$ of both functions is odd. Assuming the number of quadrature points is equal to $L+1$ and $K+1$, this means that the rule needs to be applied when both $L$ and $K$ are even and different (when $L=K$ there is nothing to interpolate or filter). It is possible to avoid the singularity entirely by requiring that $L$ and $K$ always be odd, then $\mu_j \ne \mu_k$ and the Legendre derivative are not needed, but this is too restrictive. Applying L'Hopital's rule to $\mu_j$ we get a version of \eqref{cdform2} that handles the singularity
\ea{
\dfrac{f_m'(\theta_k)}{\epsilon_{L+1}^m} &=&
\widetilde{P}_{L+1}^m(\mu_k)\sum_{j=1}^J f_m(\theta_j)w_j
\left\{
\begin{array}{cc}
\dfrac{\widetilde{P}_L^m(\mu_j)}{\mu_k - \mu_j} & \mu_j \ne \mu_k \\
-\dfrac{d\widetilde{P}_{L}^m(\mu_j)}{d\mu_j} & \mu_j = \mu_k \\
\end{array} \right.
\nonumber \\
\ & \ & - \widetilde{P}_{L}^m(\mu_k)\sum_{j=1}^J f_m(\theta_j)w_j
\left\{
\begin{array}{cc}
\dfrac{\widetilde{P}_{L+1}^m(\mu_j)}{\mu_k - \mu_j} & \mu_j \ne \mu_k \\
-\dfrac{d\widetilde{P}_{L+1}^m(\mu_j)}{d\mu_j} & \mu_j = \mu_k \\
\end{array} \right. }
%
%\begin{eqnarray}
%\dfrac{f_m'(\theta_k)}{\epsilon_{K+1}^m} &=& \widetilde{P}_{K+1}^m(\mu_k)\sum_{\substack{j=1 \\ \mu_k \ne \mu_j}}^J \dfrac{f_m(\theta_j)w_j\widetilde{P}_K^m(\mu_j)}{\mu_k - \mu_j} - \widetilde{P}_{K}^m(\mu_k)\sum_{\substack{j=1 \\ \mu_k \ne \mu_j}}^J \dfrac{f_m(\theta_j)w_j\widetilde{P}_{K+1}^m(\mu_j)}{\mu_k - \mu_j} \nonumber \\
%\ & \ & + \left[\dfrac{d\widetilde{P}_{K+1}^m(\mu_k)}{d\mu_k} f_m(\theta_j)w_j\widetilde{P}_K^m(\mu_k) - \dfrac{d\widetilde{P}_{K}^m(\mu_k)}{d\mu_k}f_m(\theta_j)w_j\widetilde{P}_{K+1}^m(\mu_k)\right]_{\mu_k = \mu_j}
%\end{eqnarray}
%
%\begin{equation}
%\begin{array}{c}
%\dfrac{f_m'(\theta_k)}{\epsilon_{L+1}^m} =
%\dfrac{d\widetilde{P}_{L+1}^m(\mu_k)}{d\mu_k}\sum_{j=1}^J f_m(\theta_j)w_j
%\widetilde{P}_L^m(\mu_j) \left\{
%\begin{array}{cc}
%\dfrac{1}{\mu_k - \mu_j} & \mu_j \ne \mu_k \\
%1 & \mu_j = \mu_k \\
%\end{array} \right. \\
%-
%\dfrac{d\widetilde{P}_{L}^m(\mu_k)}{d\mu_k}\sum_{j=1}^J f_m(\theta_j)w_j
%\widetilde{P}_{L+1}^m(\mu_j) \left\{
%\begin{array}{cc}
%\dfrac{1}{\mu_k - \mu_j} & \mu_j \ne \mu_k \\
%1 & \mu_j = \mu_k \\
%\end{array} \right. \\
%\end{array}
%\end{equation}
%
%%\dfrac{f_m(\theta_j)w_j\widetilde{P}_K^m(\mu_j)}{\mu_k - \mu_j} - \widetilde{P}_{K}^m(\mu_k)\sum_{\substack{j=1 \\ \mu_k \ne \mu_j}}^J \dfrac{f_m(\theta_j)w_j\widetilde{P}_{K+1}^m(\mu_j)}{\mu_k - \mu_j} \nonumber \\
%%\ & \ & + \left[\dfrac{d\widetilde{P}_{K+1}^m(\mu_k)}{d\mu_k} f_m(\theta_j)w_j\widetilde{P}_K^m(\mu_k) - \dfrac{d\widetilde{P}_{K}^m(\mu_k)}{d\mu_k}f_m(\theta_j)w_j\widetilde{P}_{K+1}^m(\mu_k)\right]_{\mu_k = \mu_j}
%%\end{eqnarray}
%
%
%%\noindent where
%%
%%\begin{equation}
%%\dfrac{d}{dx}\widetilde{P}_{l}^m(x) = -m \dfrac{x}{1-x^2} \widetilde{P}_{l}^m(x) + \dfrac{\sqrt{(l+m+1)(l-m)}}{\sqrt{1-x^2}} \widetilde{P}_{l}^{m+1}(x)
%%\end{equation}
%%
%%or
%
%%\begin{equation}
%%\dfrac{d}{dx}\widetilde P_l^m(x) = \dfrac{1}{x^2-1}\left( lx \widetilde P_l^m(x) - \sqrt{\dfrac{(l+1/2)}{(l-1/2)}}\sqrt{(l+m)(l-m)} \widetilde P_{l-1}^m(x)\right)
%%\end{equation}
%
%
%\begin{equation}
%\dfrac{f_m'(\theta_k)}{\epsilon_{L+1}^m} =
%\dfrac{d\widetilde{P}_{L+1}^m(\mu_k)}{d\mu_k} \sum_{j=1}^J f_m(\theta_j)w_j \widetilde{P}_L^m(\mu_j) -
%\dfrac{d\widetilde{P}_{L}^m(\mu_k)}{d\mu_k} \sum_{j=1}^J f_m(\theta_j)w_j\widetilde{P}_{L+1}^m(\mu_j), \qquad \mu_k = \mu_j
%\end{equation}
Another way to think about the singular point, when it occurs, is to consider $1/(\mu_k - \mu_j)$ as a matrix that is multiplied on the right by a vector that indexes $\mu_j$ (e.g., $f_m(\theta_j) w_j\widetilde P_L^m(\mu_j)$) and multiplied element-wise on the left by a vector that indexes $\mu_k$ (e.g., $\widetilde P_L^m(\mu_k)$). Only the central matrix element needs to be adjusted, but the adjustment applies to both the matrix element and the element in the right hand vector. This makes what should be a simple matrix-vector multiply awkward to compute. We handle this by recomputing the row-vector multiplication that contains the singular point separately, but better solutions exist. Finally, the application of L'Hopital's rule in \cite{yucel2008helmholtz} does not appear to handle the sum over $j$ correctly, and \cite{jakob1997fast} mentions this procedure but does not give the equations.
The above equations are for interpolation. For filtering, simply exchange the nodes and weights between $L$ and $K$. The intermediate sum will be restricted again to a maximum degree $L$, because the filtered field only has harmonics up to order $m = L$. Truncating the intermediate sum is equivalent to truncating the coefficients $f_{lm}'$ in the standard spherical filter.
\subsubsection{Basic implementation}
The routine \texttt{fssfilt} implements a basic version of the fast scalar spherical filter and works the same as \texttt{ssfilt}. It expects $L \le K$ and that the input field be sampled at the nodes of quadrature with either $I = 2L' + 1$ and $J = L' +1$ points for interpolation, or $P = 2K' +1$ and $Q = K' + 1$ points for filtering. $L'$ and $K'$ are set by the length of $\mu_j$ and $\mu_k$, respectively, such that $L' \ge L$ or $K' \ge K$. Provisions are included for the singular point based on the values of $L'$ and $K'$. It computes the matrix-vector multiplication directly and does not implement the 1D FMM acceleration. It also computes the Legendre polynomials anew at each call, so it can be made much faster with appropriate precomputation, because only the $L$ and $L+1$ harmonics are needed. The routine returns the same result as \texttt{ssfilt} to machine precision, and is several times faster for low number of harmonics.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/fssfilt.m}
}
%\subsubsection{Fast implementation}
%
%The real speed of the fast scalar filter comes from using the 1D FMM to accelerate the matrix vector multiplication as well as precomputing the Legendre polynomials and auxiliaries of the 1D FMM.
\clearpage
\section{Vector Spherical Filter}
\label{sec:vecsphfilter}
In this section, we give routines for interpolating and filtering vector spherical harmonics. Like the scalar routines, they could also stand on their own apart from the FMM. Fast versions of the vector spherical filter are also possible, either based on similar concepts of the fast scalar filter in which the forward and inverse Legendre transforms are compressed, or by using the fast scalar filter with modifications.
\subsection{Vector Spherical Harmonic Transforms}
The vector spherical harmonics form a complete basis, so any band limited vector field can be represented as sum of harmonics
\begin{eqnarray}
\bb{F}(\theta,\phi) &=& F_{\theta}(\theta,\phi) \hat\theta + F_{\phi}(\theta,\phi) \hat\phi \\
\ & = & \sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} \bb{B}_{lm}(\theta,\phi) + c_{lm} \bb{C}_{lm}(\theta,\phi)
\end{eqnarray}
\noindent where $F_{\theta}(\theta,\phi)$ and $F_{\phi}(\theta,\phi)$ are scalar spherical functions representing each vector component. In general, the vector spherical harmonics, $\bb{B}_{lm}$ and $\bb{C}_{lm}$, could be fully normalized or partially normalized. Eventually, the fast vector spherical filter will use the fast scalar filter and, to accommodate this, it is best to use the partially normalized vector spherical harmonics in the derivations that follow (as opposed to the fully normalized versions that include a factor of $1/\sqrt{l(l+1)}$). The scalar functions are expanded as
\begin{eqnarray}
F_{\theta}(\theta,\phi) &=& \sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} \dfrac{d}{d\theta} Y_{lm}(\theta,\phi) + c_{lm} \dfrac{im}{\sin\theta} Y_{lm}(\theta,\phi) \label{fmmFtheta} \\
F_{\phi}(\theta,\phi) &=&\sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} \dfrac{im}{\sin\theta} Y_{lm}(\theta,\phi) -c_{lm} \dfrac{d}{d\theta} Y_{lm}(\theta,\phi) \label{fmmFphi}
\end{eqnarray}
which show the mixing of harmonics between vector components.
%\begin{eqnarray}
% F_{\theta}(\theta,\phi) &=& \sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} \dfrac{1}{\sqrt{l(l+1)}} \dfrac{d}{d\theta} Y_{lm}(\theta,\phi) + c_{lm} \dfrac{1}{\sqrt{l(l+1)}} \dfrac{im}{\sin\theta} Y_{lm}(\theta,\phi) \nonumber \\
% \ & \ & \ \\
%F_{\phi}(\theta,\phi) &=&\sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} \dfrac{1}{\sqrt{l(l+1)}} \dfrac{im}{\sin\theta} Y_{lm}(\theta,\phi) -c_{lm} \dfrac{1}{\sqrt{l(l+1)}} \dfrac{d}{d\theta} Y_{lm}(\theta,\phi) \nonumber \\
% \ & \ & \
%\end{eqnarray}
The orthogonality relations for these partially normalized vector spherical harmonics are
\ea{
\int_0^{2\pi} \int_0^{\pi}
\left\{
\begin{array}{c}
\bb{B}_{lm}(\theta,\phi) \cdot \bb{B}^*_{lm}(\theta,\phi) \\
\bb{C}_{lm}(\theta,\phi) \cdot \bb{C}^*_{lm}(\theta,\phi)
\end{array}
\right\}
\sin\theta d\theta d\phi &=& l(l+1)\delta_{ll'}\delta_{mm'} \\
\int_0^{2\pi} \int_0^{\pi}
\left\{
\begin{array}{c}
\bb{B}_{lm}(\theta,\phi) \cdot \bb{C}^*_{lm}(\theta,\phi) \end{array}
\right\}
\sin\theta d\theta d\phi &=& 0
}
Given a vector field $\bb{F}(\theta,\phi)$, the coefficients are found with
\begin{equation}
\left\{
\begin{array}{c}
b_{lm} \\
c_{lm} \\
\end{array}
\right\}
=
\dfrac{1}{l(l+1)}\int_0^{2\pi} \int_0^{\pi}
\bb{F}(\theta,\phi) \cdot
\left\{\begin{array}{c}
\bb{B}^*_{lm}(\theta,\phi) \\
\bb{C}^*_{lm}(\theta,\phi)
\end{array}\right\}
\sin\theta d\theta d\phi \label{vecanalysis}
\end{equation}
\subsection{Forward Vector Spherical Transform}
The forward vector spherical transform, as in the scalar case, is composed of a forward Fourier transform and forward Legendre transform. Writing out \eqref{vecanalysis}
\begin{eqnarray}
b_{lm} &=& \dfrac{1}{l(l+1)}\int_0^{2\pi} \int_0^{\pi} \dfrac{1}{\sqrt{2\pi}} \left( F_{\theta}(\theta,\phi) \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta} e^{-im\phi} \right) \sin\theta d\theta d\phi \nonumber \\
\ & \ & + \dfrac{1}{l(l+1)}\int_0^{2\pi} \int_0^{\pi} \dfrac{1}{\sqrt{2\pi}} \left( F_{\phi}(\theta,\phi) \dfrac{(-im)}{\sin\theta} \widetilde{P}_l^m(\cos\theta) e^{-im\phi} \right) \sin\theta d\theta d\phi
\end{eqnarray}
\begin{eqnarray}
c_{lm} &=& \dfrac{1}{l(l+1)}\int_0^{2\pi} \int_0^{\pi} \dfrac{1}{\sqrt{2\pi}} \left( F_{\theta}(\theta,\phi) \dfrac{(-im)}{\sin\theta} \widetilde{P}_l^m(\cos\theta) e^{-im\phi} \right) \sin\theta d\theta d\phi \nonumber \\
\ & \ & - \dfrac{1}{l(l+1)}\int_0^{2\pi} \int_0^{\pi} \dfrac{1}{\sqrt{2\pi}} \left( F_{\phi}(\theta,\phi)\dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta} e^{-im\phi} \right) \sin\theta d\theta d\phi
\end{eqnarray}
The integrals over latitude and longitude can be separated. Performing the $\phi$ integral first we have
\begin{eqnarray}
\left\{
\begin{array}{c}
f_{\theta,m}(\theta) \\
f_{\phi,m}(\theta) \\
\end{array}
\right\}
&=&
\dfrac{1}{\sqrt{2\pi}}
\int_0^{\pi}
\left\{
\begin{array}{c}
F_{\theta}(\theta,\phi) \\
F_{\phi}(\theta,\phi)
\end{array}\right\}
e^{-im\phi} d\phi \\
\ &=&
\dfrac{\sqrt{2\pi}}{I}
\sum_{i=1}^I
\left\{
\begin{array}{c}
F_{\theta}(\theta,\phi_i) \\
F_{\phi}(\theta,\phi_i)
\end{array}\right\}
e^{-im\phi_i}
\end{eqnarray}
\noindent where the grid points are $\phi_i = 2\pi i/I$ for $i = 0,...,I-1$. These are evaluated with a fast Fourier transform. The coefficients are then written in terms of $f_{\theta,m}(\theta)$ and $f_{\phi,m}(\theta)$ as
\begin{eqnarray}
b_{lm} &=& \dfrac{1}{l(l+1)}\int_0^{\pi} \left( \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta}f_{\theta,m}(\theta) + \dfrac{(-im)}{\sin\theta} \widetilde{P}_l^m(\cos\theta) f_{\phi,m}(\theta) \right) \sin\theta d\theta \label{blmftheta}
\end{eqnarray}
\begin{eqnarray}
c_{lm} &=& \dfrac{1}{l(l+1)}\int_0^{\pi} \left(\dfrac{(-im)}{\sin\theta} \widetilde{P}_l^m(\cos\theta) f_{\theta,m}(\theta) - \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta}f_{\phi,m}(\theta) \right) \sin\theta d\theta \label{clmftheta}
\end{eqnarray}
The integrations are performed exactly with Gaussian quadrature after a change of variables. The first change of variables is of the type
\begin{eqnarray}
\int_{0}^{\pi} \dfrac{1}{\sin\theta}f_m(\theta) \widetilde{P}_l^m(\cos \theta) \sin \theta d \theta & = & -\int_{0}^{\pi} \dfrac{1}{\sin\theta} f_m(\theta) \widetilde{P}_l^m(\cos \theta) d\cos\theta \\
\ &= & \int_{\pi}^{0} \dfrac{1}{\sqrt{1-\cos^2\theta}}f_m(\theta) \widetilde{P}_l^m(\cos \theta) d\cos\theta \\
\ & = & \int_{-1}^{1} \dfrac{1}{\sqrt{1-\mu^2}}f_m(\theta( \mu)) \widetilde{P}_l^m(\mu) d\mu, \quad \mu = \cos\theta
\end{eqnarray}
The second change of variables is of the type
\begin{eqnarray}
\int_{0}^{\pi} f_m(\theta) \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta} \sin \theta d \theta & = & -\int_{0}^{\pi} f_m(\theta) \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta} d\cos\theta \\
\ &= & \int_{\pi}^{0} f_m(\theta) \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta} d\cos\theta \\
\ &= & -\int_{-1}^{1} \sqrt{1-\mu^2}f_m(\theta(\mu)) \dfrac{\partial \widetilde{P}_l^m(\mu)}{\partial\mu}d\mu, \quad \mu = \cos\theta
\end{eqnarray}
where we have used the chain rule
\[
\dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\theta} = \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\mu}\dfrac{\partial\mu}{\partial\theta} = \dfrac{\partial \widetilde{P}_l^m(\cos\theta)}{\partial\mu} (-\sin\theta) = -\sqrt{1-\mu^2}\dfrac{\partial \widetilde{P}_l^m(\mu)}{\partial\mu} \]
Note, in \cite{yucel2008helmholtz}, the equations are given in terms of $\partial \widetilde{P}_l^m(\mu_j)/\partial\theta$ and the chain rule is not applied. The chain rule is required for the derivatives to be compatible with our computations of the Legendre derivatives.
%\ & = & \int_{-1}^{1} \dfrac{1}{\sqrt{1-\mu^2}}f_m(\theta( \mu)) \widetilde{P}_l^m(\mu) d\mu, \quad \mu = \cos\theta
%\end{eqnarray}
Equations \eqref{blmftheta} and \eqref{clmftheta} can now be evaluated via Gaussian quadrature on the interval $\mu = [-1, 1]$ as
\begin{eqnarray}
b_{lm} &=& \dfrac{1}{l(l+1)}\sum_{j=1}^J \left( \left(-\sqrt{1-\mu_j^2}\right)\dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu}f_{\theta,m}(\theta_j) + \dfrac{(-im)}{\sqrt{1-\mu_j^2}} \widetilde{P}_l^m(\mu_j) f_{\phi,m}(\theta_j) \right) w_j \nonumber \\
\ & \ & \label{eqblm1}
\end{eqnarray}
\begin{eqnarray}
c_{lm} &=& \dfrac{1}{l(l+1)}\sum_{j=1}^J \left( \dfrac{(-im)}{\sqrt{1-\mu_j^2}} \widetilde{P}_l^m(\mu_j) f_{\theta,m}(\theta_j) - \left(-\sqrt{1-\mu_j^2}\right)\dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu} f_{\phi,m}(\theta_j) \right) w_j \nonumber \\
\ & \ & \label{eqclm1}
\end{eqnarray}
\noindent where $J$ is the number of integration points in longitude with weights $w_j$ and Gaussian nodes $\mu_j = \cos\theta_j$.
%In the computation, we need an extra factor of $(-1)^m$ to be consistent with our definitions of $ \bb{B}_{lm}(\theta,\phi)$ and $\bb{C}_{lm}(\theta,\phi)$, which we didn't show in the derivations above.
The routine \texttt{vst} takes as input the scalar functions $F_{\theta}(\theta,\phi)$ and $F_{\phi}(\theta,\phi)$ sampled such that the number of rows is $I = 2L+1$ and number of columns is $J = L+1$ sampled at the points of quadrature. It returns the expansion coefficients $b_{lm}$ and $c_{lm}$ linearly indexed. It is otherwise similar in form to the routine for the scalar spherical transform, \texttt{sst}, except that there is no monopole component. The routine defaults to the partially normalized vector spherical harmonics as derived above. For fully normalized harmonics, use optional string switch \texttt{norm} that will use a factor of $1/\sqrt{l (l+1)}$, and \texttt{none} for no factors of $l$. The Legendre polynomials can be optionally precomputed for repeated application over fields of the same sampling.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/vst.m}
}
\subsection{Inverse Vector Spherical Transform}
Given coefficients $b_{lm}$ and $c_{lm}$ the inverse vector spherical transform is computed by first applying the inverse Legendre transform then an inverse Fourier transform. Note, the factor of $l(l+1)$ is not needed for partially normalized vector spherical wave functions.
\begin{eqnarray}
f_{\theta,m}(\theta_j) &=& \sum_{l=\vert m \vert }^L b_{lm} \left(-\sqrt{1-\mu_j^2}\right)\dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu} + c_{lm} \dfrac{im}{\sqrt{1-\mu_j^2}} \widetilde{P}_l^m(\mu_j) \label{eqfthj}\\
f_{\phi,m}(\theta_j) &=& \sum_{l=\vert m \vert }^L b_{lm} \dfrac{im}{\sqrt{1-\mu_j^2}} \widetilde{P}_l^m(\mu_j) - c_{lm} \left(-\sqrt{1-\mu_j^2}\right)\dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu}
\label{eqphij}
\end{eqnarray}
Again, the Gaussian nodes are $\mu_j = \cos\theta_j$. The inverse Fourier transform of $f_{\theta,m}(\theta_j)$ and $f_{\phi,m}(\theta_j)$ in $\phi$ then gives
\begin{equation}
\left\{
\begin{array}{c}
F_{\theta}(\theta_j,\phi_i)\\
F_{\phi}(\theta_j,\phi_i) \\
\end{array}
\right\}
=
\dfrac{1}{\sqrt{2\pi}}
\sum_{m=-L}^L
\left\{\begin{array}{c}
f_{\theta,m}(\theta_j) \\
f_{\phi,m}(\theta_j)
\end{array}\right\}
e^{im\phi_i}
\end{equation}
The routine \texttt{ivst} computes the inverse vector spherical transform given coefficients $b_{lm}$ and $c_{lm}$. It returns the vector field components $F_{\theta}(\theta,\phi)$ and $F_{\phi}(\theta,\phi)$. The coefficients matrices contain all harmonics up through $L$ all $m$ and must be length $L^2 + 2L$. There has to be at least $I = 2L+1$ sampling points in $\phi$ and at least $J = L+1$ nodes of quadrature in $\theta$, which is determined from the length of the input $\mu_j$. Like \texttt{isst}, this allows the routine to performs interpolation automatically onto a grid that is sampled for a harmonic degree larger than $L$. The routine defaults to the partially normalized vector spherical harmonics. For fully normalized harmonics, use optional string switch \texttt{norm} to include a factor of $1/\sqrt{l (l+1)}$. The Legendre polynomials can be optionally precomputed for repeated application over fields of the same sampling.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/ivst.m}
}
\subsection{Vector Spherical Filter}
The routine \texttt{vsfilt} is a straight forward implementation of the vector spherical filter. It works like the scalar spherical filter, \texttt{ssfilt}, to accomplish vector spherical interpolation or filtering by zero padding or truncating the expansion coefficients. It takes as input the maximum degrees of the harmonic content $L$ and $K$, where $L \le K$ on either side of the transforms. However, the sampling can be greater than the requested degree of harmonic when interpolating or filtering as long as $L\le L'$ and $K\le K'$. For interpolation, the input functions are $F_{\theta}(\theta,\phi)$ and $F_{\phi}(\theta,\phi)$, which are both sized $I \times J = 2L'+1 \times L' + 1$ on a meshgrid. It returns $F_{\theta}(\theta',\phi')$ and $F_{\phi}(\theta',\phi')$, which are both sized $P \times Q = 2K'+1 \times K'+1$. Visa-versa for filtering. The routine decides to interpolate or filter based on the size of the input functions and lengths of $\mu_j$ and $\mu_k$. This routine calls \texttt{vst} and \texttt{ivst} sequentially, which means that the spherical harmonics normalization does not matter, so the default is to use partially normalized vector spherical harmonics.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/vsfilt.m}
}
\subsection{Fast Vector Spherical Filter}
Like in the scalar spherical filter, the vector spherical transforms above are bogged down by the Legendre transforms. There are two methods for accelerating the computation.
The first method is similar to the fast scalar spherical filter where the forward vector transform is substituted into the inverse vector transform. This results in sums of mixed products of Legendre polynomial and Legendre polynomial derivatives that look like they should be simplified with Christoffel-Darboux formulas, but expressions for simplifying the mixed terms have not been found to the best of our knowledge. This means that the 1D FMM speed up is not available. However, the sums can be precomputed, then the computation carried out with matrix-vector multiplication will be easy to implement and pretty fast.
The second method is the one that is recommended throughout the literature. Interpolation/filtering is accomplished by applying the fast scalar filter to each scalar field component of the vector field. The complication comes from the fact that the vector spherical harmonics contain derivatives of the Legendre polynomial. As a result, correction terms are needed for the harmonics at the edge of the spectrum of the field that is being interpolated or filtered. Finally, we find that method 1 and method 2 agree to machine precision with the previous routines.
\subsubsection{Method 1 - Precomputed Matrix-Vector Multiply}
Similar to the fast scalar spherical filter, we can derive a fast vector interpolation and filter procedure by substituting the forward vector transform into inverse vector transform. Defining the following terms \begin{eqnarray}
a_j &=& -\sqrt{1-\mu_j^2} \\
b_j &=& \dfrac{-i}{\sqrt{1-\mu_j^2}} = \dfrac{i}{a_j}
\end{eqnarray}
then \eqref{eqblm1} and \eqref{eqclm1} can be written
\begin{eqnarray}
b_{lm} &=& \dfrac{1}{l(l+1)}\sum_{j=1}^J \left( a_j\dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu}f_{\theta,m}(\theta_j) + m b_j \widetilde{P}_l^m(\mu_j) f_{\phi,m}(\theta_j) \right) w_j \label{meth11} \\
c_{lm} &=& \dfrac{1}{l(l+1)}\sum_{j=1}^J \left( m b_j\widetilde{P}_l^m(\mu_j) f_{\theta,m}(\theta_j) + (-a_j)\dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu} f_{\phi,m}(\theta_j) \right) w_j \label{meth12}
\end{eqnarray}
Similar to the fast scalar operation, the sum over $l$ in the inversion transform only goes up to a maximum harmonic $L$. If we are interpolating, this is the maximum degree harmonic of the coarsely sampled field (all coefficients $b_{lm}$ and $c_{lm}$ greater than $L$ are zero). When filtering, the harmonic coefficients are truncated to harmonics $L$. In both cases, the limit of the sum is the same, all that changes is the coarse/fine sampling of either field. Letting the $\theta$ samples of the resultant field be indexed by $k$, equations \eqref{eqfthj} and \eqref{eqphij} are first written more compactly as
\begin{eqnarray}
f_{\theta,m}(\theta_k) &=& \sum_{l=\vert m \vert }^L b_{lm} a_k \dfrac{\partial \widetilde{P}_l^m(\mu_k)}{\partial\mu} + c_{lm} m(-b_k) \widetilde{P}_l^m(\mu_k) \label{fvsfiltf1} \\
f_{\phi,m}(\theta_k) &=& \sum_{l=\vert m \vert }^L b_{lm} m(-b_k) \widetilde{P}_l^m(\mu_k) + c_{lm}(-a_k)\dfrac{\partial \widetilde{P}_l^m(\mu_k)}{\partial\mu} \label{fvsfiltf2}
\end{eqnarray}
After substituting \eqref{meth11} and \eqref{meth12} into \eqref{fvsfiltf1} and \eqref{fvsfiltf2}, exchanging the order of summation, and collecting terms we can write the combined Legendre transforms as
\begin{eqnarray}
f_{\theta,m}(\theta_k) &=& \sum_{j=1}^J f_{\theta,m}(\theta_j) A_m(\mu_j,\mu_k) + f_{\phi,m}(\theta_j) B_m(\mu_j,\mu_k) \label{ftmkmat} \\
f_{\phi,m}(\theta_k) &=& \sum_{j=1}^J - f_{\theta,m}(\theta_j) B_m(\mu_j,\mu_k) + f_{\phi,m}(\theta_j)A_m(\mu_j,\mu_k) \label{fpmkmat}
\end{eqnarray}
where
\begin{eqnarray}
A_m(\mu_j,\mu_k) &=& w_j a_j a_k M_{1,m}(\mu_j,\mu_k) - w_j b_jb_k m^2 M_{2,m}(\mu_j,\mu_k) \\
B_m(\mu_j,\mu_k) &=& w_j b_j a_k m M_{3,m}(\mu_j,\mu_k) + w_j a_jb_k m M_{4,m}(\mu_j,\mu_k)
%C_m(\mu_j,\mu_k) &=& w_j a_j (-b_k)m M_{4,m}(\mu_j,\mu_k) + w_j b_j(-a_k) m M_{3,m}(\mu_j,\mu_k)\nonumber
%D_m(\mu_j,\mu_k) &=& w_j b_j (-b_k) m^2 M_{2,m}(\mu_j,\mu_k) + w_j (-a_j)(-a_k) M_{1,m}(\mu_j,\mu_k)\nonumber
\end{eqnarray}
and
\begin{eqnarray}
M_{1,m}(\mu_j,\mu_k) &=& \sum_{l=\vert m \vert }^L \dfrac{1}{l(l+1)} \dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu}\dfrac{\partial \widetilde{P}_l^m(\mu_k)}{\partial\mu} \\
M_{2,m}(\mu_j,\mu_k) &=& \sum_{l=\vert m \vert }^L \dfrac{1}{l(l+1)} \widetilde{P}_l^m(\mu_j)\widetilde{P}_l^m(\mu_k) \\
M_{3,m}(\mu_j,\mu_k) &=& \sum_{l=\vert m \vert }^L \dfrac{1}{l(l+1)} \widetilde{P}_l^m(\mu_j)\dfrac{\partial \widetilde{P}_l^m(\mu_k)}{\partial\mu} \\
M_{4,m}(\mu_j,\mu_k) &=& \sum_{l=\vert m \vert }^L \dfrac{1}{l(l+1)} \dfrac{\partial \widetilde{P}_l^m(\mu_j)}{\partial\mu} \widetilde{P}_l^m(\mu_k)
\end{eqnarray}
In the fast scalar operator, the Christoffel-Darboux formula was used to simplify the sums over $l$ and yield an expression that can be accelerated with the 1D FMM. Similar formulas for the above expressions have not been found. Regardless, the matrices $A_m(\mu_j,\mu_k) $, $B_m(\mu_j,\mu_k) $ can be precomputed and \eqref{ftmkmat} and \eqref{fpmkmat} can be computed as matrix-vector multiplication. After which, $f_{\theta,m}(\theta_k)$ and $f_{\phi,m}(\theta_k)$ are computed and then the inverse Fourier transform over $m$ completes the filter.
The routine \texttt{fvsfilt1} implements the fast vector spherical filter using the matrix-multiplication method above. It detects whether to interpolate or filter based on the size of the input fields, which need to be sampled at the nodes of quadrature consistent with $L$ and $K$. It uses \texttt{fvsfilt1AmBm} to precompute the matrices $A_m(\mu_j,\mu_k) $, $B_m(\mu_j,\mu_k)$, which take as input just $L$ and $K$, where one is either interpolating from $L$ harmonics to $K$, of filtering from $K$ harmonics down to $L$. Use string switch \texttt{'interp'} or \texttt{'filter'} for interpolation or filter. A handy trick is that the same basic computation of the matrices applies no matter if one is interpolating or filtering, one simply swaps the sample points and nodes and weights of quadrature. The indexing then also needs to swap. The intermediate sums always only go to $L$, which is again the equivalent of zero padding the spherical harmonic expansion coefficients when interpolating or truncating when filtering. The routine returns the same result as \texttt{vsfilt} to machine precision. With precomputation, it is faster than \texttt{vsfilt} and becomes progressively faster as the number of harmonics increases.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/fvsfilt1.m}
}
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/fvsfilt1AmBm.m}
}
\subsubsection{Method 2 - Fast Scalar Filter with Correction Terms}
The fast scalar filter cannot simply be applied to each scalar component of the vector fields. This is because the vector spherical harmonics contain derivatives of the Legendre polynomials, which are themselves composed of Legendre polynomials at harmonic degrees one above and one below the harmonic degree of the derivative. The fast scalar filter meanwhile a) only operates on spherical harmonics that contain non-differentiated Legendre polynomials, and b) only operates up to the highest degree in the spectrum and no more. If we want to filter the scalar components of the vector field to degree $L$, the fast scalar filter will only be accurate for harmonic degrees less than or equal to $L-1$. There is still a way to use the fast scalar filter up to degree $L$, but correction terms are needed to account for the Legendre derivatives that straddle the harmonic cutoff.
The approach is to rewrite the expressions for the vector spherical harmonic expansions in terms of purely scalar spherical harmonics. This results in a handful of leftover terms which are collected to create the needed correction terms. The correction terms in \cite{yucel2008helmholtz} appear to have errors, and those given in \cite{shanker2003fast} are not for normalized Legendre polynomials, so we rederive the correction terms here.
In a few places in the literature it is stated that the correction terms are only needed when filtering. The reasoning goes that when a field is interpolated there is no harmonic content above degree $L$, so the correction terms are not needed. However, when the Legendre derivatives are split into pure Legendre polynomials, the harmonics straddle the band edge regardless of whether a field is interpolated or filtered. We found that the correction terms are still required when interpolating in order to give the same results as our previous vector spherical filter routines.
\paragraph{Legendre Derivative Relations:}
The first step is to express the derivative of the Legendre polynomial (the $d/d\theta$ version) as a linear combination of Legendre polynomials. Start with the following two identities for unnormalized Legendre polynomials:
\begin{eqnarray}
(2l+1)\cos\theta P_l^m(\cos\theta) &=& (l+m)P_{l-1}^m(\cos\theta) + (l-m+1)P_{l+1}^m(\cos\theta) \label{legrelfmm1} \\
\sin\theta \dfrac{dP_l^m(\cos\theta)}{d\theta} &=& l\cos\theta P_l^m(\cos\theta) - (l+m)P_{l-1}^m(\cos\theta) \label{legrelfmm2}
\end{eqnarray}
Substituting \eqref{legrelfmm1} into \eqref{legrelfmm2} it can be shown that
\begin{equation}
\sin\theta \dfrac{dP_l^m(\cos\theta)}{d\theta} = \dfrac{l(l-m+1)}{2l+1}P_{l+1}^m(\cos\theta) - \dfrac{(l+m)(l+1)}{2l+1}P_{l-1}^m(\cos\theta)
\end{equation}
Multiply both sides by the normalization factor of the normalized Legendre polynomials
\begin{eqnarray}
\sqrt{(l + 1/2)\dfrac{(l-m)!}{(l+m)!}}\sin\theta \dfrac{dP_l^m(\cos\theta)}{d\theta} & =& \sqrt{(l + 1/2)\dfrac{(l-m)!}{(l+m)!}}\dfrac{l(l-m+1)}{2l+1}P_{l+1}^m(\cos\theta) \nonumber \\
\ & \ & - \sqrt{(l + 1/2)\dfrac{(l-m)!}{(l+m)!}}\dfrac{(l+m)(l+1)}{2l+1}P_{l-1}^m(\cos\theta) \nonumber \\
\end{eqnarray}
Finally, multiply the $l+1$ and $l-1$ polynomials by appropriate factors in order to apply the definition of the normalized Legendre polynomials, then simplify to get
\begin{eqnarray}
\sin\theta \dfrac{d \widetilde P_l^m(\cos\theta)}{d\theta} & =& \sqrt{\dfrac{(l + 1/2)(l+1+m)}{(l+3/2)(l+1-m)}}\dfrac{l(l-m+1)}{2l+1}\widetilde P_{l+1}^m(\cos\theta) \nonumber \\
\ & \ & - \sqrt{\dfrac{(l + 1/2)(l-m)}{(l-1/2)(l+m)}}\dfrac{(l+m)(l+1)}{2l+1}\widetilde P_{l-1}^m(\cos\theta) \label{sinthetadPlmtheta}
\end{eqnarray}
\paragraph{$F_{\theta}(\theta,\phi)$ Component:}
Consider the $\theta$ component, \eqref{fmmFtheta}, after multiplication by $\sin\theta$.
\begin{equation}
\sin\theta F_{\theta}(\theta,\phi) = \sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} \sin\theta\dfrac{d}{d\theta} Y_{lm}(\theta,\phi) + c_{lm} (im) Y_{lm}(\theta,\phi)
\end{equation}
Substituting \eqref{sinthetadPlmtheta}, this can be written in terms of pure spherical harmonics as
\begin{eqnarray}
\sin\theta F_{\theta}(\theta,\phi) &=& \sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} h_1(l,m) Y_{l+1,m}(\theta,\phi) \nonumber \\
\ & \ & - \sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} h_2(l,m) Y_{l-1,m} (\theta,\phi) \nonumber \\
\ & \ & + \sum_{l=1}^{L} \sum_{m = -l}^{l} c_{lm} h_3(l,m)Y_{l,m}(\theta,\phi)
\end{eqnarray}
\noindent where
\begin{eqnarray}
h_1(l,m) &=& \sqrt{\dfrac{(l + 1/2)(l+1+m)}{(l+3/2)(l+1-m)}}\dfrac{l(l-m+1)}{2l+1} \\
h_2(l,m) &=& \sqrt{\dfrac{(l + 1/2)(l-m)}{(l-1/2)(l+m)}}\dfrac{(l+m)(l+1)}{2l+1} \\
h_3(l,m) &=& im
\end{eqnarray}
Next, let $L$ be the highest harmonic we are interpolating from (up to $K$) or the highest harmonic we are filtering to (down from $K$) to create the scalar function $\widetilde F_{\theta}(\theta',\phi')$ at the new sampling. Then for each sum in turn, make the following substitutions respectively: $l+1 \rightarrow l $, $l-1 \rightarrow l $, and $l \rightarrow l$ so that the spherical harmonics have the same indices
\begin{eqnarray}
\sin\theta' \widetilde F_{\theta}(\theta',\phi') &=& \sum_{l=2}^{L+1} \sum_{m = -(l-1)}^{(l-1)} \widetilde b_{l-1,m} h_1(l-1,m) Y_{lm}(\theta',\phi') \nonumber \\
\ & \ & - \sum_{l=0}^{L-1} \sum_{m = -(l+1)}^{(l+1)} \widetilde b_{l+1,m} h_2(l+1,m) Y_{lm} (\theta',\phi') \nonumber \\
\ & \ & + \sum_{l=1}^{L} \sum_{m = -l}^{l} \widetilde c_{lm} h_3(l,m) Y_{lm}(\theta',\phi')
\end{eqnarray}
%\begin{eqnarray}
%\sin\theta' \widetilde F_{\theta}(\theta',\phi') &=& \sum_{k=2}^{K+1} \sum_{m = -(k-1)}^{(k-1)} \widetilde b_{k-1,m} h_1(k-1,m) Y_{km}(\theta',\phi') \nonumber \\
%\ & \ & - \sum_{k=0}^{K-1} \sum_{m = -(k+1)}^{(k+1)} \widetilde b_{k+1,m} h_2(k+1,m) Y_{km} (\theta',\phi') \nonumber \\
%\ & \ & + \sum_{k=1}^{K} \sum_{m = -k}^{k} \widetilde c_{km} h_3(k,m) Y_{km}(\theta',\phi')
%\end{eqnarray}
Spin out the terms $L$ and $L+1$ and collect the sums over $l$ (the same expression in \cite{yucel2008helmholtz} does not appear to be correct, while the one in \cite{shanker2003fast} does)
\begin{eqnarray}
\sin\theta' \widetilde F_{\theta}(\theta',\phi') &=& \sum_{l=0}^{L-1} \sum_{m = -l}^l \widetilde d_{l,m} Y_{lm}(\theta',\phi') \nonumber \\
\ & \ & + \sum_{m = -L}^{L} \widetilde e_{L,m} Y_{Lm} (\theta',\phi') \nonumber \\
\ & \ & + \sum_{m = -L}^{L} \widetilde e_{L+1,m} Y_{L+1,m}(\theta',\phi')
\end{eqnarray}
%\begin{eqnarray}
%\sin\theta' \widetilde F_{\theta}(\theta',\phi') &=& \sum_{k=0}^{K-1} \sum_{m = -k}^k \widetilde d_{k,m} Y_{km}(\theta',\phi') \nonumber \\
%\ & \ & + \sum_{m = -K}^{K} \widetilde e_{K,m} Y_{Km} (\theta',\phi') \nonumber \\
%\ & \ & + \sum_{m = -K}^{K} \widetilde e_{K+1,m} Y_{K+1,m}(\theta',\phi')
%\end{eqnarray}
where
\begin{eqnarray}
\widetilde d_{l,m} & = & \widetilde b_{l-1,m}h_1(l-1,m) - \widetilde b_{l+1,m}h_2(l+1,m) + \widetilde c_{l,m}h_3(l,m) \\
\widetilde e_{L,m} & = & \widetilde b_{L-1,m}h_1(L-1,m) + \widetilde c_{L,m} h_3(L,m) \label{correct1} \\
\widetilde e_{L+1,m} & = & \widetilde b_{L,m}h_1(L,m) \label{correct2}
\end{eqnarray}
%\begin{eqnarray}
%\widetilde d_{k,m} & = & \widetilde b_{k-1,m}h_1(k-1,m) - \widetilde b_{k+1,m}h_2(k+1,m) + \widetilde c_{k,m}h_3(k,m) \\
%\widetilde e_{K,m} & = & \widetilde b_{K-1,m}h_1(K-1,m) + \widetilde c_{K,m} h_3(K,m) \label{correct1} \\
%\widetilde e_{K+1,m} & = & \widetilde b_{K,m}h_1(K,m) \label{correct2}
%\end{eqnarray}
\paragraph{$F_{\phi}(\theta,\phi)$ Component:}
The $\phi$ component, \eqref{fmmFphi}, after multiplication by $\sin\theta$, is
\begin{equation}
\sin\theta F_{\phi}(\theta,\phi) = \sum_{l=1}^{L} \sum_{m = -l}^{l} b_{lm} im Y_{lm}(\theta,\phi) -c_{lm} \sin\theta\dfrac{d}{d\theta} Y_{lm}(\theta,\phi)
\end{equation}
This is structurally similar to the $\theta$ component. Making the change $b_{lm} \rightarrow -c_{lm}$ and $c_{lm} \rightarrow b_{lm}$ in the above derivation we can immediately write
\begin{eqnarray}
\sin\theta' \widetilde F_{\phi}(\theta',\phi') &=& \sum_{l=0}^{L-1} \sum_{m = -l}^l \widetilde f_{l,m} Y_{lm}(\theta',\phi') \nonumber \\
\ & \ & + \sum_{m = -L}^{L} \widetilde g_{L,m} Y_{Lm} (\theta',\phi') \nonumber \\
\ & \ & + \sum_{m = -L}^{L} \widetilde g_{L+1,m} Y_{L+1,m}(\theta',\phi')
\end{eqnarray}
%\begin{eqnarray}
%\sin\theta' \widetilde F_{\phi}(\theta',\phi') &=& \sum_{k=0}^{K-1} \sum_{m = -k}^k \widetilde f_{k,m} Y_{km}(\theta',\phi') \nonumber \\
%\ & \ & + \sum_{m = -K}^{K} \widetilde g_{K,m} Y_{Km} (\theta',\phi') \nonumber \\
%\ & \ & + \sum_{m = -K}^{K} \widetilde g_{K+1,m} Y_{K+1,m}(\theta',\phi')
%\end{eqnarray}
where
\begin{eqnarray}
\widetilde f_{l,m} & = & -\widetilde c_{l-1,m}h_1(l-1,m) + \widetilde c_{l+1,m}h_2(l+1,m) + \widetilde b_{l,m}h_3(l,m) \\
\widetilde g_{L,m} & = & -\widetilde c_{L-1,m}h_1(L-1,m) + \widetilde b_{L,m} h_3(L,m) \label{correct3} \\
\widetilde g_{L+1,m} & = & -\widetilde c_{L,m}h_1(L,m) \label{correct4}
\end{eqnarray}
%\begin{eqnarray}
%\widetilde f_{k,m} & = & -\widetilde c_{k-1,m}h_1(k-1,m) + \widetilde c_{k+1,m}h_2(k+1,m) + \widetilde b_{k,m}h_3(k,m) \\
%\widetilde g_{K,m} & = & -\widetilde c_{K-1,m}h_1(K-1,m) + \widetilde b_{K,m} h_3(K,m) \label{correct3} \\
%\widetilde g_{K+1,m} & = & -\widetilde c_{K,m}h_1(K,m) \label{correct4}
%\end{eqnarray}
(a minus sign is missing in \cite{yucel2008helmholtz})
\paragraph{Summary:}
Despite the complications, the manipulations have so far been exact. This implies that the first summation is the result obtained by applying the scalar filter directly to the vector field component up to degree $L-1$. The second and third sums correct the effects of the Legendre polynomials derivatives at the highest harmonic of the truncation. Thus the fast scalar filter can be applied to obtain the field contribution from harmonics $l = 1,..,L-1$, while the correction terms at $L$ and $L+1$ are summed directly. The coefficients $\widetilde d_{l,m}$ and $\widetilde f_{l,m}$ are never actually computed, and neither is $h_2(l,m)$.
In \cite{yucel2008helmholtz} it is stated that the signal being filtered must be sampled on a grid one degree higher, because the field actually contains information at $L+1$, so that the number of $(\theta,\phi)$ evaluation points needs to correspond to degree $L+1$. However, we found this sampling requirement not be the case. Rather, the scalar filter can be applied up to degree $L-1$ on a grid sampled for $L$. The scalar filter could also be applied up to degree $L$, then the correction terms need to occur at $L+1$ and $L+2$.
Note that the scalar field components are first multiplied by $\sin\theta$ before applying the fast scalar filter, then the filtered result is divided by $\sin\theta'$. The correction terms are simply divided by $\sin\theta'$ before being summed. We never divide by zero, because the Gaussian nodes never sample the poles.
\paragraph{Routine:}
The routine \texttt{fvsfilt2} is a non-optimized implementation of the algorithm above, and written only to show that these equations work. The inputs and outputs are the same as \texttt{vsfilt}. The routine calls the fast scalar filter routine \texttt{fssfilt} to interpolate from, or filter to, harmonics at $L-1$ (at the time of this writing, that routine was not optimized). Next, the routine \texttt{vst} is used to compute all the vector spherical harmonic expansion coefficients up to $L$ of the input fields, even though only degrees $L-1$ and $L$ are needed. It then computes and applies the correction terms, and sums the spherical harmonics at $L$ and $L+1$ directly which are computed from \texttt{sphericalY}. The results match \texttt{vsfilt} and \texttt{fvsfilt1} with an accuracy slightly less than machine precision.
This implementation is inefficient because each of the subroutines compute all of the Legendre polynomials anew at each call. However, the fast scalar filter and the correction terms only need Legendre polynomials at degrees $L-2$, $L-1$, $L$, and $L+1$. The proper way to implement this is to precompute the Legendre polynomials and derivatives for these harmonics, which are then used for in-line implementations of the fast scalar filter and the combined forward and inverse Legendre transforms.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/SphericalFilters/fvsfilt2.m}
}
%A slow version of these equations are implemented in \texttt{vstfilterbasic}.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/vstfilterbasic.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/vecfiltcorr.m}
%}
%
%\subsection{Local Field Interpolation}
%
%In general, there are two approaches to the interpolation/filter step: global methods, and local methods. We have so far used a global method for interpolation and filtering. Global methods are exact to machine precession and can be computed at best with $O(L^2 \log L)$ speed. Local methods, on the other hand, interpolate the field locally to accomplish both the interpolation and filtering step and cost $O(L)$.
%
%In \cite{}, Legrange interpolation is used to interpolate and filter the far field pattern in two dimensions. Legrange interpolation is exact for polynomials less than a certain degree. On a 2D grid with $2p \times 2p$ stencil, this is given by
%
%\begin{equation}
%f(\theta,\phi) \approx \sum_{j=s+1-p}^{s+p} w_j(\phi) \sum_{i=t+1-p}^{t+p} v_i(\theta)f(\theta_i,\phi_j)
%\end{equation}
%
%\noindent where $w_j(\phi)$ and $v_i(\theta)$ are the interpolation weights given by
%
%\begin{equation}
%w_j(\phi) = \prod_{\substack{m=s+1-p \\ m \neq j}}^{s+p} \dfrac{\phi-\phi_m}{\phi_j - \phi_m}
%\end{equation}
%
%\begin{equation}
%v_i(\theta) = \prod_{\substack{n=t+1-p \\ n \neq i}}^{t+p} \dfrac{\theta-\theta_n}{\theta_i - \theta_n}
%\end{equation}
%
%It was reported in \cite{} that the local interpolation method was accurate to three digits. We tried this, and found that Legrange interpolation is exact for scalar field harmonics when $m$ is even, while harmonics when $m$ is odd could only be interpolated to three digits. The reason for this is because the associated Legendre polynomials with $m$ even are simple polynomials, while $m$ odd contain a factor of $\sqrt{1-x^2}$, which is not a polynomial (or is with an infinite number of terms). Therefore the error when interpolating fields composed of spherical harmonics comes not from the Legrange interpolation but the fact that $m$ odd have a non-polynomial factor. This cannot be changed or improved. Thus, we stick with the global interpolation methods.
%
%
%\newpage
%\section{Storage}
%
%The extended bandwidth formula of the minimum degree $L$ given the translation operator precision and group dimension is
%
%\begin{eqnarray}
%L &\approx& kd + 1.8 \alpha^{2/3}\left(kd\right)^{1/3} \\
%\alpha &=& \log_{10}(1/\epsilon)
%\end{eqnarray}
%
%The number of elements required to store a scalar field is
%
%\begin{equation}
%N = (2L+1)(L+1) = 2L^2 + 3L + 1
%\end{equation}
%
%Assuming the field is composed of 16 byte complex values the number bytes required to store a scalar field is $B = 16N$. The following figures show $L$ and $B$ versus group dimension and translation precision.
%
% \begin{figure}[h]
% \centering
% \includegraphics[width=3.5in]{FastMultipoleMethod/digvsL}
% \caption{}
% \label{fig7}
%\end{figure}
%
% \begin{figure}[h]
% \centering
% \includegraphics[width=3.5in]{FastMultipoleMethod/storgvsL}
% \caption{}
% \label{fig8}
%\end{figure}
%
%\newpage
%
%\section{FMM Structure}
%
%The box hierarchy is structured as regular octree with $N_{levs}$ levels. The top is level 1, the lowest is $N_{levs}$. The number of harmonics at each level, and thus the sampling, are determine by the dimension of the boxes at that level, the bandwidth formula, and the precision of translation between boxes at that level. Interpolation and filtering operations do not depend on the location of the boxes, so the Legendre polynomials only have to be computed and stored per level transition and are good for the entire domain. However, each unique translation matrix must be precomputed.
%We require $L$ to be odd to have an even number of latitude points. This ensures that latitudes points between levels do not coincide, which avoids the singularity in the core filter operation.
%
%
%
%
%\subsection{Level Properties}
%
%The properties at each level are
%
%\begin{table}[H]
%\caption{Properties of each level}
%\begin{center}
%\begin{tabular}{|c|c|}
%\hline
%Box edge dimension & $d$ \\
%\hline
%Maximum degree harmonic & $L$\\
%\hline
%Number of $\phi$ samples & $I = 2L + 1$ \\
%\hline
%Number $\theta$ samples & $J = L + 1$ \\
%\hline
%$\phi$ samples & $\phi_i = 2\pi i/I$, $i = 0,...,I-1$ \\
%\hline
%$\theta$ samples & $\theta_j = \arccos(\mu_j)$ \\
%\hline
%$J$ Gaussian quadrature nodes on $[-1,1]$ & $\mu_j$ \\
%\hline
%$J$ Gaussian quadrature weights & $w_j$ \\
%\hline
%\end{tabular}
%\end{center}
%\label{tab4}
%\end{table}%
%
%
%The routine \texttt{fmmL} computes the maximum degree harmonics and box dimensions at each level. It takes the side length of the box at the top most level, the number of levels, and the precision of the translation and uses the extended bandwidth formula. It forces $L$ to be odd.
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmL.m}
%}
%
%The routine \texttt{fmmLevel} takes the harmonic degrees at each level computed in \texttt{fmmL} and computes the properties in Table \ref{tab4} for each level, stored in a structure array. The nodes and weights of Gaussian quadrature are precomputed quickly and accurately to any degree $L$ using \texttt{legpts} from the package Chebfun.
%
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmLevel.m}
%}
%
%
%\subsection{Interpolation/Filter}
%
%There are $N_{levs}-1$ transitions that exist between $N_{levs}$ levels. The sums in the interpolator or filter are limited by the maximum harmonic degree of the \textit{smaller} of the two levels. Therefore, we index the transitions relative to the level that is being interpolated from, or filtered to. Let the small of the two levels have maximum degree $L$, sampled in latitude at $\mu_j$, while the larger of the two levels has maximum degree $K$ sampled in latitude at $\mu_k$. The only quantities that must be precomputed to compute a filter or interpolation between levels $l$ to $l+1$ are given in Table \ref{tab5}. Once computed, the quantities in the table are good for any transition in the entire computational domain assuming the hierarchy of boxes is a regular octree.
%
%
%\begin{table}[htbp]
%\caption{Precomputed quantities required to interpolate up from, or filter down to, level $l+1$.}
%\begin{center}
%\begin{tabular}{|c|c|c|}
%\hline
%Quantity & Interpolation & Filter \\
%\hline
%$\widetilde P_{L-1}^m (\mu_j)$ & \ & x \\
%\hline
%$\widetilde P_{L}^m (\mu_j)$ & x & x \\
%\hline
%$\widetilde P_{L+1}^m (\mu_j)$ & x & x \\
%\hline
%$\widetilde P_{L-1}^m (\mu_k)$ & \ & x \\
%\hline
%$\widetilde P_{L}^m (\mu_k)$ & x & x \\
%\hline
%$\widetilde P_{L+1}^m (\mu_k)$ & x & \ \\
%\hline
%$\dfrac{d}{d\mu}\widetilde P_{L-1}^m (\mu_k)$, $\dfrac{d}{d\mu}\widetilde P_{L}^m (\mu_k)$& \ & x \\
%\hline
%1D FMM, $\mu_j$ source, $\mu_k$ observation & x & \ \\
%\hline
%1D FMM, $\mu_k$ source, $\mu_j$ observation & \ & x \\
%\hline
%\end{tabular}
%\end{center}
%\label{tab5}
%\end{table}
%
%Due to the nature of the sums in the filter or interpolation, all required Legendre polynomials only need to be computed for $m=-L,...,L$ on grids size $(2L+1)\times(L+1)$ or $(2L+1)\times(K+1)$. For polynomials of degree $L-1$, values at $m=L$ are set to zero. For polynomials of degree $L+1$, the sums that use these polynomials only reach $L$.
%
%\subsubsection{Precomputed Structure Array}
%
%The routine \texttt{fmmIntFilt} returns a structure array containing the quantities in Table \ref{tab5} precomputed to interpolate or filter the far-field patterns between levels. In Matlab, \texttt{fft} returns one-sided frequencies that correspond to harmonics $[0:M, -M:-1]$. Our computation of Legendre polynomials returns the order $[-M:M]$. We rearrange the polynomial harmonics to correspond to the FFT harmonics to avoid additional indexing.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmIntFilt.m}
%}
%
%\subsubsection{Fast Interpolation/Filter for FMM}
%
%We provide two working versions of the fast vector interpolation and filter operations based on the two methods described above. To summarize, method 1 relies on precomputed matrices for the core operation, is easier to implement, but is limited to $O(L^3)$ operations. Method 2 uses the fast scalar filter at its core, requires correction terms, is more complicated, but can be accelerated with the 1D FMM to $O(L^2 log L)$ operations.
%
%\subsubsection{Method 1}
%
%The routine \texttt{fmmvecinterp1} interpolates a vector field from level $l+1$ to $l$, while the routine and \texttt{fmmvecfilter1} filters a vector field from level $l$ to $l+1$. They are designed to work with the precomputed matrices $A$ and $B$ stored in the structure array that is the output of \texttt{fmmIntFilt}. The matrices are precomputed using the routine \texttt{fmmComputeInterpFilter}.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmvecinterp1.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmvecfilter1.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmComputeInterpFilter.m}
%}
%
%
%\subsubsection{Method 2}
%
%The routine \texttt{fmmvecinterp2} interpolates a vector field from level $l+1$ to $l$, while the routine and \texttt{fmmvecfilter2} filters a vector field from level $l$ to $l+1$. They are designed to work with the precomputed quantities stored in the structure array that is the output of \texttt{fmmIntFilt}. Each are implemented similarly as follows:
%
%\begin{enumerate}
%\item Application of the fast scalar interpolation from harmonic $L-1$ or fast scalar filter to harmonic $L-1$. These are provided by the routines \texttt{fmminterpLm1} and \texttt{fmmfilterLm1}. The field components are multiplied by $\sin\theta_j$ (interpolation) or $\sin\theta_k$ (filter) on input. The routines assume that there is no singularity in the core operation (I.e., even number of latitude samples).
%Two implementations of the core computation are available: straight matrix-vector multiplication of precomputed $1/(x_j-x-k)$ matrix, or 1D FMM. At the moment, Matlab's matrix-vector multiplication is faster than our implementation of the 1D FMM. This may change in a different language. The 1D FMM version is commented but fully functional
%
%\item Direct computation of vector harmonic coefficients at $L$ and $L+1$. This is done with \eqref{eqblm1} and \eqref{eqclm1}. All multiplying factors in those equations are included during pre-computation in \texttt{fmmIntFilt}.
%
%\item Computation of the correction terms in equations \eqref{correct1}, \eqref{correct2}, \eqref{correct3}, \eqref{correct4} by the function \texttt{fmmCorrectionTerms}.
%
%\item Computation of the scalar field corrections via \eqref{eqist1} and \eqref{eqist2}. Factors of $2\pi$ that appear in the original filters have again been cancelled.
%
%\item Sum the fields from the fast filter and corrected field to give the final filtered vector field components. Divide the sum by $\sin\theta_j$.
%\end{enumerate}
%
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmvecinterp2.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmminterpLm1.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmvecfilter2.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmfilterLm1.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmCorrectionTerms.m}
%}
%
%\subsection{Octree}
%
%An octree is a recursive division of a cube into octants. Each cube at a given level is called a group, which has at most 8 occupied children. We use the publicly available Matlab routine \texttt{BuildOctree} to construct the octree. It takes the $(x,y,z)$ coordinates of scatterer points and minimum group size. It returns a structure array of group relations. It prunes unoccupied groups and determines the near-neighbors. We modify it in order to specific the edges of the bounding box (We will later replace the \texttt{neargrouptouch} array with the interlayer near neighbor list and add the interlayer interaction list.) The routine \texttt{fmmInitializeTree} adds structure elements for $F_{\theta}$ and $F_{\phi}$ at each group.
%
%%\begin{table}[htbp]
%%\caption{Properties of tree structure array}
%%\begin{center}
%%\begin{tabular}{|c|c|}
%%\hline
%%Level & $l$\\
%%\hline
%%Box index at this level & $i$ \\
%%\hline
%%Level of parent & $l+1$ \\
%%\hline
%%Global index of parent at level $l+1$ & $p$ \\
%%\hline
%%Octant in parent's box & $1,...,8$ \\
%%\hline
%%Level of children & $l-1$ \\
%%\hline
%%Global indexes of children at level $l-1$ & $c_1$, $c_2$, ..., $c_n$ \\
%%\hline
%%Coordinates of box center & \bb{x} \\
%%\hline
%%Field components size $(2L+1)\times(L+1)$ & $F_{\theta}(\theta,\phi)$, $F_{\phi}(\theta,\phi)$ \\
%%\hline
%%\end{tabular}
%%\end{center}
%%\label{default}
%%\end{table}%
%
%
%\begin{table}[htbp]
%\caption{Properties of Tree structure array.}
%\begin{center}
%\begin{tabular}{|c|c|}
%\hline
%List of children per group per level & \texttt{Tree(l).group(g).child(c)} \\
%\hline
%Coordinates of group center & \texttt{Tree(l).group(g).groupcenter} \\
%\hline
%Length of box edge & \texttt{Tree(l).group(g).cubelength} \\
%\hline
%List of near-neighbors & \texttt{Tree(l).group(g).neargrouptouch(nn)} \\
%\hline
%$F_{\theta}(\theta,\phi)$, size $(2L+1)\times(L+1)$ & \texttt{Tree(l).group(g).Fth} \\
%\hline
%$F_{\phi}(\theta,\phi)$, size $(2L+1)\times(L+1)$ & \texttt{Tree(l).group(g).Fphi} \\
%\hline
%\end{tabular}
%\end{center}
%\label{default}
%\end{table}%
%
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmInitializeTree.m}
%}
%
%
%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/octree}
% \caption{Octree structure of random points. $N_{levs}$ = 5, maximum group size 10$\lambda$, minimum group size 0.625$\lambda$.}
% \label{fig9}
%\end{figure}
%
%
%\subsection{Traditional Interaction List}
%
%The tradition FMM interaction list is constructed recursively starting at the top most level and sweeping the entire octree in order to build a near-neighbor list and interaction list for each group at each level. The interaction list for a group consists of groups that are in its so-called neighborhood, i.e., children of the near neighbors of its parent, that are not near neighbors to the group itself. Neighborhood boxes are well removed, but the outgoing fields are not accounted for one level up because the parents are near neighbors. [Adapted from Darve 2000]
%
%\begin{figure}[htbp]
%\begin{algorithmic}[1]
%\footnotesize
%\Function{Main}{}
%\State CurrentGroup = TopGroup
%\State Store CurrentGroup in NearNeighbor list of CurrentGroup
%\State \Call{BuildInteractionList}{CurrentGroup,Nlevs,level = 1}
%\EndFunction
%\\
%\Function{BuildInteractionList}{Parent,Nlevs,level}
%\If {level == Nlevs} \State {\Return}
%\Else{
%\For {Parent's NearNeighbors}
%\For{Neighborhood = Children of Parent's NearNeighbors}
%\For{Child = Children of Parent}
%\If {Neighborhood == Child's NearNeighbor}
%\State{Add Neighborhood to Child's NearNeighbor list}
%\Else
%\State{Add Neighborhood to Child's Interaction list}
%\EndIf
%\EndFor
%\EndFor
%\EndFor
%\For {Child = Children of Parent}
%\State \Call{BuildInteractionList}{Parent,Nlevs,level+1}
%\EndFor
%}
%\EndIf
%\EndFunction
%\end{algorithmic}
%\caption{Pseudocode for building a traditional FMM near-neighbor and interaction lists}\label{}
%\end{figure}
%
%
%\newpage
%
%\section{Multi-layer FMM Algorithm}
%
%The aim of the multi-layer FMM algorithm is to capture first-order scattering through dielectric layer interfaces in 3D domains that are many thousands of wavelengths. The goal of the implementation is to capture enough of the relevant scattering physics while making the problem computationally tractable. The scattering physics we need to capture are
%
%\begin{enumerate}
%\item Reflection, refraction, specularity, and rough surface effects at an interface.
%\item First-order interactions between many facets between layers over Fresnel-zone sized regions.
%\item Two-way propagation and reflection at each interface.
%\end{enumerate}
%
%Item 1 is accomplished with scattering matrices that capture scattering from facets that are used to discitize the interfaces. Item 2 is accomplished via a user defined interaction rule in combination with FMM acceleration through aggregation/dissaggregation. Item 3 is accomplished by sweeping the fields from the top interface to the bottom interface and back again. On the return pass, upward transmission fields are combined with reflected fields from the same interface.
%
%This algorithm drives two key elements: 1) a method of computation for scattering matrices of facets, 2) the construction of an different type of interaction list from the traditional one. Scattering matrices transform all incoming plane waves into all outgoing plane waves, and, while large, fit naturally into the plane wave formulation of the FMM.
%
%Diagrams
%
%
%
%
%\subsection{Multi-layer Interaction List}
%
%For a large multi-layer problem, we construct the interaction action list as follows:
%
%\begin{enumerate}
%\item For scattering between layers, we only need a near-neighbor list and interaction list between layers, not within a layers.
%\item We only need to bookkeep near-neighbors and interactions from the interface of layer $n$ to $n+1$ (downward). Reverse interactions are necessity captured in this list.
%\item If there are $N$ interfaces, there are $N-1$ interaction lists.
%\item For extremely large problems, we will likely not aggregate to the top most FMM level. This is because the number of harmonics required is too large, or the storage for the groups will not be parallelizable. We defined a maximum FMM level for translations. When groups in two adjacent layers at the maximum level are well separated, the interaction list will be constructed with a rule we define (e.g., radiation cones). When groups in two adjacent layers are neighbors (at the max level or below) the traditional interaction rules apply.
%\item We will enforce the rule that the minimum spacing between two interfaces must greater than one box at the lowest FMM level, so that there are no near-neighbors at the lowest level.
%\end{enumerate}
%
%First define the points for each layer. An FMM octree is constructed for the points of each layer. Each layer octree is a subset of a global octree, such that boxes between layers are on the same global grid. Next, create a layer structure array that contains the octree for all layers (in large problems, for example, this might be a link across files, one file per layer) and their dielectric properties. Then, we identify groups at the max level in layer $n$ that have near neighbors in layer $n+1$ (including self terms) and construct the tradition interaction lists for them between the two layers.
%
%
%\begin{figure}[hbtp]
%\begin{algorithmic}[1]
%\footnotesize
%\Function{Main}{}
%\For {layer = 1 to Nlayers}
%\State tree = \Call{BuildOctree}{layer}
%\State Add tree to Layer structure array
%\EndFor
%\State Initialize the trees in the Layer structure array with empty near neighbor and interaction lists that will point to groups indices in layer+1 (up to layer N-1)
%\For {layer = 1 to Nlayers-1}
%\For {CurrentGroup = Groups at max level}
%\State Find CurrentGroup near neighbors in layer+1 and if a self group.
%\EndFor
%\EndFor
%\For {layer =1 to Nlayers-1}
%\For {CurrentGroup = Groups at max level }
%\If {CurrentGroup has near neighbors in layer+1}
%\State \Call{BuildNNInteractionList}{CurrentGroup,layer,Nlevs,level = maxlevel}
%\EndIf
%\If {CurrentGroup has non-near neighbors in layer+1}
%\State \Call{BuildMaxLevelInteractionList}{CurrentGroup,layer,level = maxlevel}
%\EndIf
%\EndFor
%\EndFor
%\EndFunction
%\\
%\Function{BuildNNInteractionList}{Parent,layer,Nlevs,level}
%\If {level == Nlevs} \State {\Return}
%\Else{
%\For {Parent's NearNeighbors in layer+1}
%\For{Neighborhood = Children of Parent's NearNeighbors in layer+1}
%\For{Child = Children of Parent}
%\If {Neighborhood == Child's NearNeighbor in layer+1}
%\State{Add Neighborhood to Child's NearNeighbor list in layer+1}
%\Else
%\State{Add Neighborhood to Child's Interaction list in layer+1}
%\EndIf
%\EndFor
%\EndFor
%\EndFor
%\For {Child = Children of Parent}
%\State \Call{BuildNNInteractionList}{Child,layer,Nlevs,level+1}
%\EndFor
%}
%\EndIf
%\EndFunction
%\end{algorithmic}
%\caption{Pseudocode for building a multi-layer near-neighbor and interaction lists}\label{}
%\end{figure}
%
%% we have a maximum upper limit on level, due to size
%% the interactions at the max lev is decided by a different rule, (cone
%% size, interaction angle, parallelizable maximum level L < 600)
%% upper layer to lower, will capture all lower to upper as well
%% includes a self term
%% minimum spacing between layers determined so they are well separated at
%% the lowest level, because we have no direct/self terms.
%
%\newpage
%
%The function \texttt{fmmLayerInteractionList} takes the layer structure array, maximum FMM level and returns the same structure array with near neighbor and interactions for $N-1$ layers. It initializes the lists and finds the near neighbors at the maximum level. It then calls \texttt{fmmInteractionList} which recursively down-traverses a group with any near-neighbors in the next level to form the lower level interaction lists. The routine \texttt{fmmMaxLevelInteractionList} creates the interactions list between layers at the maximum level. The rule here finds the groups at the maximum level in the next layer who's centers fall within a downward scattering cone.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmLayerInteractionList.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmNNInteractionList.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmMaxLevelInteractionList.m}
%}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/layerNN}
% \caption{Near neighbors between a group in layer 1 and groups in layer 2.}
% \label{fig10}
%\end{figure}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/layerInt}
% \caption{Interactions between a group in layer 1 groups in layer 2, who's parents are near neighbors but are themselves well separated.}
% \label{fig1}
%\end{figure}
%
%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/cone}
% \caption{Interactions between a group in layer 1 those in layer 2 at the maximum level defined by a downward scattering cone. Here the cone is has 30 degree half angle and pitch in the $-y$ direction of 12 degrees.}
% \label{fig1}
%\end{figure}
%
%
%\newpage
%
%
%\subsection{Translation}
%
%The translation operators can be computed two ways: 1) precompute all unique translation operators for all interaction groups in their entirety and store them: 2) precompute the sampled translation operators and use the fast interpolated scheme before.
%
%On a regular grid, the number of unique translations is greatly reduced. At levels less than the maximum interaction level (not including the lowest level), a group has at most $3^3-1 = 26$ nearest neighbors, and at most $7^3-3^3-1 = 315$ possible interaction directions. Given a maximum group level, when we transfer between layers, we may have many fewer or many more than 315 unique interactions.
%
%\begin{table}[htbp]
%\caption{Precomputed translation operators}
%\begin{center}
%\begin{tabular}{|c|c|}
%\hline
%Precomputed translation operators, for unique $kr$ & $T_L(\bb{k},\bb{X})$ \\
%\hline
%\end{tabular}
%\end{center}
%\label{default}
%\end{table}
%
%For this Matlab implementation, we choose option 1) to precompute the unique translation operators in the forward and reverse directions and adds them to the layer structure. We do this because the interpolation scheme is, at the moment, fairly slow. This is done with the routine \texttt{fmmTranslationOperators}. It finds the unique translations between interacting groups between each layer for each level and computed them. It adds a list of interaction indices that index the translation operator for that group and the group in the next layer at the same position in the next-layer interaction list. It then computes reverse interaction lists that point from a lower level to the next higher level.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmTranslationOperators.m}
%}
%
%
%\subsection{Layers Field Storage}
%
%Only outgoing fields from interacting groups must be stored. Incoming radiation patterns are computed during disaggregation on the fly. An interface has one set of downward fields after transmission, and one set of upward fields after reflection and transmission from the lower layer on the return pass. The fields are sampled on the usual $2L+1$ x $L+1$ phi/theta. There are two exceptions: 1) at the top interface only transmitted fields are stored, 2) at the bottom interface only reflected fields are stored. Reflection from the upper surface is computed separately between the source and the each facet directly, this will have benefits later.
%
%The routine \texttt{fmmInitializeLayerTree} creates this storage based on the interaction lists.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmInitializeLayerTree.m}
%}
%
%\newpage
%\subsection{Multi-layer Scattering Algorithm Psuedo-Code}
%
%\begin{figure}[htbp]
%\begin{algorithmic}[1]
%\footnotesize
%\State Create the layer interfaces (facet centers, facet normals, dielectric properties)
%\State Call \Call{BuildOctreeMod}{} to create FMM trees for each layer interface
%\State Create an $N\times1$ Layers struct array
%\State Add FMM layer interface trees to Layers struct
%\State Call \Call{fmmLayerInteractionList}{} to create the interaction lists and add them to Layers struct
%\State Call \Call{fmmLevel}{} to create level properties struct
%\State Call \Call{fmmIntFilt}{} to create interpolation and filter struct
%\State Call \Call{fmmTranslationOperators}{} to compute the unique translation operations in the interaction list and add them to the Layers struct
%\State Call \Call{fmmInitializeLayerTree}{} to create storage for all outgoing fields in Layers struct
%\State Compute $S_{11}$ for each facet directly to source \Comment{\textit{Surface return}}
%\For{Layer = top to bottom}\Comment{\textit{Downward pass}}
%\For{CurrentGroup = Groups at max level}
%%\State Copy tree struct for CurrentGroup, create temporary storage at each level.
%\If {Layer = top}
%\State Compute $S_{21}$ for each facet directly from source field
%\State Aggregate $S_{21}$ to max level and store as transmitted field
%\Else
%\State Translate and disaggregate incoming fields from layer above to facet level
%\State Compute $S_{11}$ for each facet
%\State Aggregate $S_{11}$ to max level and store as reflected field
%\If {Layer != bottom}
%\State Compute $S_{21}$ for each facet
%\State Aggregate $S_{21}$ to max level and store as transmitted field
%\EndIf
%\EndIf
%\EndFor{}
%\EndFor{}
%
%\For{Layer = next-to-last to top}\Comment{\textit{Upward pass}}
%\For{CurrentGroup = Groups at max level}
%\State Translate and disaggregate reflected fields from lower layer to facet level
%\If {Layer = top}
%\State Compute $S_{12}$ for each facet directly to source
%\State Compute $S_{11}$ for each facet directly to source
%\State Sum $S_{12}$ and $S_{11}$ contributions to source received field
%\Else
%\State Compute $S_{12}$ for each facet
%\State Aggregate $S_{12}$ transmission to max level
%\State Add aggregated transmission to previously stored reflected fields
%\EndIf
%\EndFor{}
%\EndFor{}
%
%%\\
%%\\
%% \Function{Main}{}
%%\State CurrentGroup = TopGroup
%%\State Store CurrentGroup in NearNeighbor list of CurrentGroup
%%\State \Call{BuildInteractionList}{CurrentGroup,Nlevs,level = 1}
%%\EndFunction
%%\\
%%\Function{BuildInteractionList}{Parent,Nlevs,level}
%%\If {level == Nlevs} \State {\Return}
%%\Else{
%%\For {Parent's NearNeighbors}
%%\For{Neighborhood = Children of Parent's NearNeighbors}
%%\For{Child = Children of Parent}
%%\If {Neighborhood == Child's NearNeighbor}
%%\State{Add Neighborhood to Child's NearNeighbor list}
%%\Else
%%\State{Add Neighborhood to Child's Interaction list}
%%\EndIf
%%\EndFor
%%\EndFor
%%\EndFor
%%\For {Child = Children of Parent}
%%\State \Call{BuildInteractionList}{Parent,Nlevs,level+1}
%%\EndFor
%%}
%%\EndIf
%%\EndFunction
%\end{algorithmic}
%\caption{Pseudocode for the multi-layer FMM algorithm}\label{}
%\end{figure}
%
%\newpage
%
%\subsection{Subtree Recursion Template}
%
%The routine \texttt{treeTraverse} is a template for recursing through a subtree given the starting level and group index.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/treeTraverse.m}
%}
%
%\subsection{Create Subtree}
%
%Subtrees will be used to provide scratch space for aggregation and disaggregation of max level groups. All max level groups that are well separated between layers only need to store the outgoing fields at the max level, not the underlying fields at each level of aggregation or disaggregation. Therefore, we create scratch space in a subtree that contains field storage at every group. We will transfer only the fields required for interacting groups to the global tree, then destroy the subtree. This also makes aggregation/disaggregation easy, because we simply loop over all existing elements of the tree.
%
%The routine \texttt{fmmMakeSubTree} will extract a subtree from the global tree for a given layer with a local parent/child indexing. The subtree structure is initialized, then the routine calls \texttt{fmmTraverseNewTree} to determine the local parent/child indexing and group index in the global tree, and calls \texttt{fmmInitializeTree} to create field storage for every group in the subtree. We give it another structure called \texttt{Pts}, which is a structure array containing the locations of the scatterers. It will return a tree with storage down to the level of the scatterers.
%
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmMakeSubTree.m}
%}
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmTraverseNewTree.m}
%}
%
%\subsection{Aggregate Subtree}
%
%The routine \texttt{fmmAggregateSubTree} will aggregate the fields up a subtree produced by \texttt{fmmMakeSubTree} starting at the scatterer level up to the max level. The outgoing fields from the scatterers must first be loaded at the lowest level. The wave number is a variable because will we aggregate the same tree in two different media (one for up going waves, one for down going waves).
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmAggregateSubTree.m}
%}
%
%\subsection{Layer-to-layer Translation}
%
%Translations will be computed per group at the maximum level and loaded into the current subtree. The direction of the translation, and therefore the operator and medium it was computed in will depend on the whether we are in the downward or upward pass of the layers.
%
%The routine \texttt{fmmTranslateToSubTree} computes the translated fields to all level of the subtree that need it (i.e., nonzero interaction list). It includes a flag (called 'add') to the subtree to indicate whether the disaggregation routine should add the the filtered field with the stored field or not.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmTranslateToSubTree.m}
%}
%
%
%\subsection{Disaggregate Subtree}
%
%The routine \texttt{fmmDisaggregateSubTree} will disaggregate the fields in a subtree produced by \texttt{fmmMakeSubTree} starting at the maximum level down to the scatter level. The incoming fields at all levels must be loaded. Filtered fields are added to whatever existing fields already exist at that level based on the \texttt{add} switch. The wave number is again a variable.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmDisaggregateSubTree.m}
%}
%
%
%\subsection{Store Subtree}
%
%After a subtree as been aggregated to the max level, only the outgoing fields for groups with non-zero interaction lists need to be stored in the Layers structure.
%
%The routine \texttt{fmmStoreSubTree} takes an aggregated subtree and identifies from the global indices which fields must be stored. The string switch \texttt{TRstr} indicates if the fields are stored as reflected or transmitted fields. The string switch \texttt{writestr} indicates whether to overwrite or add to existing fields.
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/fmmStoreSubTree.m}
%}
\clearpage
\newpage
\section{Scattering Matrices for the FMM}
\subsection{S-matrix and Field Multiplication using Quadrature}
The scattering matrix (S-matrix) (or scattering function matrix, \cite{tsang2000scattering}) embeds the scattering behavior of an object as a mapping between incident and scattered plane waves of different incident and scattered directions and polarizations. In the FMM, fields are treated as expansions of plane waves where many plane waves are incident on a local region at once. The outgoing scattered field in any particular direction is the sum of scattering contributions from all incident waves. In the limit, this sum can be computed as an integral of incident directions over the unit sphere. Casting the S-matrix this way allows it to be used in the structures of the FMM.
For the FMM, we choose the orthonormal basis for the S-matrix formed by $\hat{k}$, $\hat{\theta}$, and $\hat{\phi}$, such that the incident and scattered fields are defined over two far-field patterns $\bb{F}(\theta_s,\phi_s)$ and $\bb{G}(\theta_i,\phi_i)$. \begin{equation}
\bb{E}_i(\hat{k}_i) = \left(G_{\theta}(\hat{k}_i)\hat{\theta} +G_{\phi}(\hat{k}_i) \hat{\phi} \right) e^{i\bb{k}_i \cdot \br}
\end{equation}
\begin{equation}
\bb{E}_s(\hat{k}_s) = \left( F_{\theta}(\hat{k}_s)\hat{\theta} + F_{\phi}(\hat{k}_s)\hat{\phi}\right) \dfrac{ e^{i k r}}{r}
\end{equation}
\begin{equation}
\twobyone{F_{\theta}(\hat{k}_s)}{F_{\phi}(\hat{k}_s)} =
\twobytwo
{S_{\theta\theta}(\hat{k}_s,\hat{k}_i) }
{S_{\theta\phi}(\hat{k}_s,\hat{k}_i) }
{S_{\phi\theta}(\hat{k}_s,\hat{k}_i) }
{S_{\phi\phi}(\hat{k}_s,\hat{k}_i) }
\twobyone{G_{\theta}(\hat{k}_i)}{G_{\phi}(\hat{k}_i)}
\end{equation}
% Here, $\hat{\theta}$, and $\hat{\phi}$ are the same as the $\hat{h}$ and $\hat{v}$ polarizations in the 'wave-oriented' or 'forward scattering alignment' (FSA) polarization convention \cite{ulaby2014microwave}. %In addition, $\hat{\theta}$, and $\hat{\phi}$ are really the same polarization components relative to the frame of the scatterer, where $\hat{k}_i$ and $\hat{k}_s$ are just radial vectors in the direction of propagation.
The matrix above maps any pair of direction/polarization to any other pair. Define the operation that transforms an incoming field pattern, $\bb{G}(\hat{\bb{k}})$, to an outgoing field pattern, $\bb{F}(\hat{\bb{k}})$, as the integral of the S-matrix over the unit sphere of incident directions
\begin{equation}
\bb{F}(\hat{\bb{k}}_s) = \int \overline{\bb{S}}(\hat{\bb{k}}_s,\hat{\bb{k}}_i)\cdot \bb{G}(\hat{\bb{k}}_i) d\Omega_{k_i}
\end{equation}
or
\begin{equation}
\twobyone{F_{\theta}(\hat{\bb{k}}_s)}{F_{\phi}(\hat{\bb{k}}_s)} =
\int \twobytwo
{S_{\theta\theta}(\hat{\bb{k}}_s,\hat{\bb{k}}_i)}
{S_{\theta\phi}(\hat{\bb{k}}_s,\hat{\bb{k}}_i)}
{S_{\phi\theta}(\hat{\bb{k}}_s,\hat{\bb{k}}_i)}
{S_{\phi\phi}(\hat{\bb{k}}_s,\hat{\bb{k}}_i)}
\cdot \twobyone{G_{\theta}(\hat{\bb{k}}_i)}{G_{\phi}(\hat{\bb{k}}_i)} d\Omega_{k_i} \label{smatintegral}
\end{equation}
To compute this exactly, the field pattern and the S-matrix are sampled at the nodes of Gaussian quadrature on a grid that is $(2L+1) \times (L+1)$ for maximum harmonic degree $L$. Then \eqref{smatintegral} can be discretized as
\begin{equation}
\twobyone{F_{\theta}(\theta_{\mu},\phi_{\nu})}{F_{\phi}(\theta_{\mu},\phi_{\nu})} =
\dfrac{2\pi}{2L+1} \sum_{i=1}^{2L+1} \sum_{j=1}^{L+1} w_j \twobytwo
{S_{\theta\theta}(\theta_{\mu},\phi_{\nu};\theta_j,\phi_i)}
{S_{\theta\phi}(\theta_{\mu},\phi_{\nu};\theta_j,\phi_i)}
{S_{\phi\theta}(\theta_{\mu},\phi_{\nu};\theta_j,\phi_i)}
{S_{\phi\phi}(\theta_{\mu},\phi_{\nu};\theta_j,\phi_i)} \cdot \twobyone{G_{\theta}(\theta_j,\phi_i)}{G_{\phi}(\theta_j,\phi_i)}
\end{equation}
\noindent where the spherical angles $(\theta_j,\phi_i)$ and $(\theta_{\mu},\phi_{\nu})$ are the samples of quadrature. Technically, only the incident directions needed to be sampled by quadrature. As always with quadrature, the poles are never sampled, so the polarization ambiguity at the poles never occurs. Writing this in matrix form, where the 2D spherical sum is over columns of the matrix, and the weights and multiplying constants are put in a diagonal matrix,\begin{equation}
\twobyone{\bb{F}_{\theta}}{\bb{F}_{\phi}} =
\twobytwo{\overline{\bb{S}}_{\theta\theta}}{\overline{\bb{S}}_{\theta\phi}}{\overline{\bb{S}}_{\phi\theta}}{\overline{\bb{S}}_{\phi\phi}} \twobytwo{\bb{W}}{0}{0}{\bb{W}}\twobyone{\bb{G}_{\theta}}{\bb{G}_{\phi}} \label{FSWG}
\end{equation}
\noindent where the elements of $\bb{W}$ contains copies of the weights $w_j$ as they apply to $\theta_j$.
The routine \texttt{compute\char`_Smatrix\char`_quad} applies the S-matrix to an incoming field pattern when both are sampled on the points of Gaussian quadrature. It returns the scattered field sampled the same way. The fields are sized $I \times J$ where $I = 2L + 1$ and $J = L+1$ for maximum harmonic degree $L$ (as written the routine can take any sampling $I$ and $J$). The S-matrix block components are $I \times J \times I \times J$ with scattered directions in the first two dimensions and incident directions in the last two dimensions. The results match the same computation when its performed by starting with a S-matrix, converting it to a T-matrix, computing the scattering via harmonic expansions, and then converting it back to an S-matrix.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/Smatrix/compute_Smatrix_quad.m}
}
%
%
%At a layer interface (e.g., surface facet), we can further define the scattering matrix with four components that account for two-way reflection and transmission at the interface such that
%
%
%\begin{equation}
%\twobyone{\bb{F}_1(\hat{\bb{k}}) }{\bb{F}_2(\hat{\bb{k}}) } = \twobytwo{\overline{\bb{S}}_{11} }{\overline{\bb{S}}_{12} }{\overline{\bb{S}}_{21} }{\overline{\bb{S}}_{22} }\twobyone{\bb{G}_1(\hat{\bb{k}}) }{\bb{G}_2(\hat{\bb{k}}) }
%\end{equation}
%
%\noindent where $\bb{G}_1$ and $\bb{F}_1$ are incoming/outgoing fields in the upper region and $\bb{G}_2$ and $\bb{F}_2$ are in the lower region. For facets, we can automatically enforce 'shadowing' by zeroing non-physical propagation combinations. For example, $\overline{\bb{S}}_{11} $ should only transform downward incident plane waves into upward plane waves, relative to the facet normal, all other combinations are zero. Likewise for the other components.
%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=3.5in]{FastMultipoleMethod/diagramsSmatrix}
% \caption{Components of a two-layer scattering matrix.}
% \label{figxx}
%\end{figure}
%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=5in]{FastMultipoleMethod/diagramsSmatrixk}
% \caption{Sampling scheme of plane wave directions or, equivalently, spherical angles. Incident directions are along columns, scattered directions are along rows. Quadrants represent paris of $\pm k_{i,z}$ and $\pm k_{s,z}$.}
% \label{figyy}
%\end{figure}
%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/diagramsSmatrix2}
% \caption{Coordinate and vector conventions for $S_{11}$ and $S_{21}$ in the FMM.}
% \label{figyy}
%\end{figure}
%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/diagramsSmatrix3}
% \caption{Coordinate and vector conventions for $S_{12}$ and $S_{22}$ in the FMM.}
% \label{figyy}
%\end{figure}
%
%
%\subsection{Scattering Matrix of a Kirchhoff Facet}
%
%We use the results of the Kirchhoff approximation to derive a scattering matrix for Kirchhoff facets in the context of the FMM. Given an incident field
%
%\[\bb{E}_i = \bb{e}_i E_o e^{i\bb{k}_i \cdot \bb{r}} \]
%
%the equations for the reflected and transmitted field from an entire facetized interface under the Kirchhoff approximation are
%
%\begin{eqnarray}
%\bb{E}_r(\br) & \approx & \dfrac{ik_1e^{ik_1r}}{4\pi r} E_o \left( \overline{\bb{I}} - \hat{k}_r \hat{k}_r \right) \cdot \sum_n \bb{F}(\br_n) \int_{S_n} dS e^{i (\bb{k}_i - \bb{k}_r) \cdot \br} \nonumber \\
%\bb{E}_t(\br) & \approx & -\dfrac{ik_2 e^{ik_2r}}{4\pi r} E_o \left( \overline{\bb{I}} - \hat{k}_t \hat{k}_t \right) \cdot \sum_n \bb{N}(\br_n) \int_{S_n} dS e^{i (\bb{k}_i - \bb{k}_t) \cdot \br} \nonumber
%\end{eqnarray}
%
%where
%
%\begin{eqnarray}
%\bb{F}(\br_n) &=& - (\hat{e}_i \cdot \hat{q} )(\hat{n} \cdot \hat{k}_i) \hat{q} (1 - R^{\textrm{TE}}) \nonumber \\
%\ & \ & + (\hat{e}_i \cdot \hat{p} )(\hat{n} \times \hat{q}) (1 + R^{\textrm{TM}}) \nonumber \\
%\ & \ &+ (\hat{e}_i \cdot \hat{q} )(\hat{k}_r \times (\hat{n} \times \hat{q})) (1 + R^{\textrm{TE}}) \nonumber \\
%\ & \ & + (\hat{e}_i \cdot \hat{p} )(\hat{n} \cdot \hat{k}_i) (\hat{k}_r \times \hat{q}) (1 - R^{\textrm{TM}}) \\
%\bb{N}(\br_n) &=& - \dfrac{\eta_2}{\eta_1}(\hat{e}_i \cdot \hat{q} )(\hat{n} \cdot \hat{k}_i) \hat{q} (1 - R^{\textrm{TE}})\nonumber \\
%\ & \ & + \dfrac{\eta_2}{\eta_1}(\hat{e}_i \cdot \hat{p} )(\hat{n} \times \hat{q}) (1 + R^{\textrm{TM}}) \nonumber \\
%\ & \ &+ (\hat{e}_i \cdot \hat{q} )(\hat{k}_t \times (\hat{n} \times \hat{q})) (1 + R^{\textrm{TE}}) \nonumber \\
%\ & \ & + (\hat{e}_i \cdot \hat{p} )(\hat{n} \cdot \hat{k}_i) (\hat{k}_t \times \hat{q}) (1 - R^{\textrm{TM}})
%\end{eqnarray}
%
%\noindent and $\bb{F}(\br')$ and $\bb{N}(\br')$ are treated constant over the surface of facets. By definition, reflected directions $\hat{k}_r$ are in the upward direction above the facet in the first medium, while transmitted directions $\hat{k}_t$ are downward in the second medium.
%
%To derive the scattering matrix of a single facet, we take one facet centered at the origin. The surface sum reduces to
%
%\begin{eqnarray}
%\bb{E}_r(\br) & \approx & \dfrac{ik_1 e^{ik_1 r}}{4\pi r} E_o \left( \overline{\bb{I}} - \hat{k}_r \hat{k}_r \right) \cdot \bb{F}(\hat{k}_i,\hat{k}_r) I(\bb{k}_i,\bb{k}_r) \\
%\bb{E}_t(\br) & \approx & -\dfrac{ik_2e^{ik_2 r}}{4\pi r} E_o \left( \overline{\bb{I}} - \hat{k}_t \hat{k}_t \right) \cdot \bb{N}(\hat{k}_i,\hat{k}_t) I(\bb{k}_i,\bb{k}_t)
%\end{eqnarray}
%
%\noindent where $\bb{F}(\hat{k}_i,\hat{k}_r)$ and $\bb{N}(\hat{k}_i,\hat{k}_t)$ depend on incident and scattered directions, surface normal, and material type. The phase integral is one of
%
%\begin{eqnarray}
%I(\bb{k}_i,\bb{k}_r) &=& \int_{S} dS e^{i (\bb{k}_i - \bb{k}_r) \cdot \br} \\
%I(\bb{k}_i,\bb{k}_t) &=& \int_{S} dS e^{i (\bb{k}_i - \bb{k}_t)\cdot \br }
%\end{eqnarray}
%
%Using they identity $ \overline{\bb{I}} - \hat{k} \hat{k} = \hat{\theta} \hat{\theta} + \hat{\phi} \hat{\phi}$ and separating the far-field phase and decay
%
%\begin{eqnarray}
%\bb{E}_r(\br) & \approx & \left( E_{\theta,s} \hat{\theta} + E_{\phi,s} \hat{\phi} \right) \dfrac{e^{ik_1 r}}{r} \\
%\bb{E}_t(\br) & \approx & \left( E_{\theta,t} \hat{\theta} + E_{\phi,t} \hat{\phi} \right) \dfrac{e^{ik_2 r}}{r}
%\end{eqnarray}
%
%\begin{eqnarray}
%E_{\theta,s} &=& E_o\dfrac{ik_1 }{4\pi}\left(\hat{\theta} \cdot \bb{F}(\hat{k}_i,\hat{k}_r) \right)I(\bb{k}_i,\bb{k}_r) \\
%E_{\phi,s} &=& E_o\dfrac{ik_1 }{4\pi}\left(\hat{\phi} \cdot \bb{F}(\hat{k}_i,\hat{k}_r) \right)I(\bb{k}_i,\bb{k}_r) \\
%E_{\theta,t} &=& E_o\dfrac{ik_2 }{4\pi}\left(\hat{\theta} \cdot \bb{N}(\hat{k}_i,\hat{k}_t) \right)I(\bb{k}_i,\bb{k}_t) \\
%E_{\phi,t} &=& E_o\dfrac{ik_2 }{4\pi}\left(\hat{\phi} \cdot \bb{N}(\hat{k}_i,\hat{k}_t) \right)I(\bb{k}_i,\bb{k}_t)
%\end{eqnarray}
%
%Noticing that $E_o$ is common with the incident field defined above, the vector components of the scattering matrix can be constructed by taking $\bb{e}_i = [\hat{\theta}, \hat{\phi}]$ in turn.
%
%For $S_{11}$ and $S_{21}$, the equations above are unchanged. The incident directions come from above the facet, reflected waves propagate above the facet, and transmitted directions propagate below the facet. For $S_{12}$ and $S_{22}$, we reverse the equations. In both cases, the field components are projected onto the same local $\hat{q}$, $\hat{p}$ basis, so that the polarization transforms consistently between all four scattering matrices. The reflection coefficients are determined by the local normal of the incident direction. We zero incident/scattering angle combinations that are inconsistent with the definition each scattering matrix.
%
%For the FMM, we can either derive a scattering matrix in the frame of the facet, requiring FMM fields to be rotated to the frame and back during computation, or we can create scattering matrices with the facet rotated in the global FMM frame. The later also fits more naturally into the structure of the FMM where plane wave directions are sampled and locked in the global frame, but ultimately it is a trade between computation and storage. In either case, the FMM plane wave directions become reflected or transmitted depending on which side of the facet they originate.
%
%
%
%
%\subsubsection{Kirchhoff Disk}
%
%The routine \texttt{sMatrixDisk} returns one of the four S-matrices for a Kirchhoff disk. It takes as input the enumerated type ($S_{11}$, $S_{21}$, $S_{12}$, $S_{22}$), incident and scattering field directions, dielectrics and wave numbers of the upper and lower media, disk area and surface normal in the global FMM frame. The reflection coefficients can be computed with complex $\epsilon_r$, but the phase integral will use the real part of the wave numbers for \texttt{intKirchhoffDisk}. The incident and scattered directions are forced to 1D arrays, and using the $\theta$, $\phi$ grids defined in the FMM Level structure, will return wave vector directions consistent with the ordering in the figure above.
%
%
%{\footnotesize
%\VerbatimInput{/Users/mshaynes/Desktop/Work/Database/sMatrixDisk.m}
%}
%
%
%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{11}Disk}
% \caption{$S_{11}$ for disk $\hat{n} = [0,0,1]$, $a = 1/4 \lambda$, $\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$}
% \label{figyy}
%\end{figure}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{21}Disk}
% \caption{$S_{21}$ for disk $\hat{n} = [0,0,1]$, $a = 1/4 \lambda$,$\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$. The fading stripe in the amplitude is due to the Brewster angle. }
% \label{figyy}
%\end{figure}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{12}Disk}
% \caption{$S_{12}$ for disk $\hat{n} = [0,0,1]$, $a = 1/4 \lambda$,$\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$}
% \label{figyy}
%\end{figure}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{22}Disk}
% \caption{$S_{22}$ for disk $\hat{n} = [0,0,1]$, $a = 1/4 \lambda$,$\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$. The fading patterns are due to the partial effects of total internal reflection (not complete total internal reflection because the disk the finite)}
% \label{figyy}
%\end{figure}
%
%%%
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{11}Disk2}
% \caption{$S_{11}$ for disk $\hat{n} = [0,1/\sqrt{2},1/\sqrt{2}]$, $a = 1/4 \lambda$, $\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$}
% \label{figyy}
%\end{figure}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{21}Disk2}
% \caption{$S_{21}$ for disk $\hat{n} = [0,1/\sqrt{2},1/\sqrt{2}]$, $a = 1/4 \lambda$, $\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$}
% \label{figyy}
%\end{figure}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{12}Disk2}
% \caption{$S_{12}$ for disk $\hat{n} = [0,1/\sqrt{2},1/\sqrt{2}]$, $a = 1/4 \lambda$, $\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$}
% \label{figyy}
%\end{figure}
%
% \begin{figure}[htbp]
% \centering
% \includegraphics[width=4in]{FastMultipoleMethod/S_{22}Disk2}
% \caption{$S_{22}$ for disk $\hat{n} = [0,1/\sqrt{2},1/\sqrt{2}]$, $a = 1/4 \lambda$, $\epsilon_{r1} = 1$, $\epsilon_{r2} = 3$}
% \label{figyy}
%\end{figure}
\subsection{S-matrix to T-matrix Transformation using Quadrature}
\label{fastStoT}
While the S-matrix is a useful 4D structure for storing the scattering properties of a target, it 1) can be difficult and inaccurate to interpolate if the propagation directions are not highly oversampled, and 2) it can be difficult to rotate between two reference frames because the wave directions and the vector components need to be transformed. On the other hand, the transition matrix (T-matrix), which relates coefficients of the incident and scattered spherical harmonic expansions, is very easy to rotate, and enables exact interpolation at arbitrary propagation directions through field expansions.
Recall the S-matrix to T-matrix transformation \eqref{StoTBC}
\eq{\tbt{\overline{\bb{T}}^{MM}}{\overline{\bb{T}}^{MN} }{\overline{\bb{T}}^{NM}}{\overline{\bb{T}}^{NN}}
=
\dfrac{k}{4\pi}
\twobytwo{\overline{\bb{L}}_1^{-1}}{0}{0}{\overline{\bb{L}}_2^{-1}}
\twobytwo{\overline{\bb{C}}_{\theta}^*}{\overline{\bb{C}}_{\phi}^*}{\overline{\bb{B}}_{\theta}^* } {\overline{\bb{B}}_{\phi}^*}
\twobytwo{\bb{W}}{0}{0}{\bb{W}}
\twobytwo
{\overline{\bb{S}}_{\theta\theta} }
{\overline{\bb{S}}_{\theta\phi}}
{\overline{\bb{S}}_{\phi\theta} }
{\overline{\bb{S}}_{\phi\phi} }
\twobytwo{\bb{W}}{0}{0}{\bb{W}}
\twobytwo{\overline{\bb{C}}_{\theta}}{\overline{\bb{B}}_{\theta}}{\overline{\bb{C}}_{\phi}}{\overline{\bb{B}}_{\phi}}
\twobytwo{\overline{\bb{L}}_2}{0}{0}{\overline{\bb{L}}_1} \label{StoTvst}}
Our routine \texttt{vst} will compute exactly the discretized integral of the vector spherical harmonics over the unit sphere when applied from the left to the columns of $\bb{S}$, when the scattered directions $\bb{S}$ are sampled at the nodes of Gaussian quadrature. It can also be used again to compute the integral over incident directions as a left operation on $\bb{S}^*$.
The routine \texttt{convert\char`_S\char`_to\char`_T} computes the S-matrix to T-matrix transformation \eqref{StoTvst}. It takes as input the four S-matrix components and returns the four components of the T-matrix. Each S-matrix component is stored on a 4D grid that is $I \times J \times I \times J$, where $I = 2L+1$ and $J = L+1$ are sampled according to quadrature for $L$ harmonics. The scattered directions are dimensions 1 and 2 and the incident directions are dimensions 3 and 4. The routine returns the four components of the T-matrix up to harmonic degree $L$ all $m$ linearly indexed, each an $N \times N$ matrix where $N = L^2 + 2L$. The routine calls \texttt{vst} with precomputed Legendre polynomials and carries out the block matrix multiplication as a sequence of operations on the columns of the S-matrix or its transpose. The columns of the S-matrix are in fact the $I \times J$ subfields that are converted to columns of the T-matrix that are pairs of coefficients length $N$. This uses the fully normalized vector spherical harmonics.
\clearpage
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/Smatrix/convert_S_to_T.m}
}
\subsection{T-matrix to S-matrix Transformation using Quadrature}
\label{fastTtoS}
Recall the T-matrix to S-matrix transformation \eqref{TtoSBC}
\eq{\twobytwo
{\overline{\bb{S}}_{\theta\theta} }
{\overline{\bb{S}}_{\theta\phi}}
{\overline{\bb{S}}_{\phi\theta} }
{\overline{\bb{S}}_{\phi\phi} }
=
\dfrac{4\pi}{k} \twobytwo{\overline{\bb{C}}_{\theta}}{\overline{\bb{B}}_{\theta}}{\overline{\bb{C}}_{\phi}}{\overline{\bb{B}}_{\phi}}\twobytwo{\overline{\bb{L}}_1}{0}{0}{\overline{\bb{L}}_2} \tbt{\overline{\bb{T}}^{MM}}{\overline{\bb{T}}^{MN} }{\overline{\bb{T}}^{NM}}{\overline{\bb{T}}^{NN}} \twobytwo{\overline{\bb{L}}_2^{-1}}{0}{0}{\overline{\bb{L}}_1^{-1}} \twobytwo{\overline{\bb{C}}_{\theta}^*}{\overline{\bb{C}}_{\phi}^*}{\overline{\bb{B}}_{\theta}^* } {\overline{\bb{B}}_{\phi}^*} \label{Sivst}}
Our routine \texttt{ivst} will compute exactly the matrix multiplication over the block vector spherical harmonics when applied as a left operation to the columns of the T-matrix. We can use it again to compute the right multiplication of the conjugate operation as a left multiplication of the conjugate T-matrix, $\bb{T}^*$.
The routine \texttt{convert\char`_T\char`_to\char`_S}, computes the four components of the S-matrix given the four components of the T-matrix that contain harmonics up to degree $L$ all $m$ linearly indexed. It calls \texttt{ivst} to compute \eqref{Sivst} as a sequence of operations over the columns of the T-matrix or its transpose. The columns of the T-matrix are treated as pairs of expansion coefficients having $N = L^2 + 2L$ harmonics each when input to \texttt{ivst} that return field quantities sized $I \times J$ that are the new columns. When done, the S-matrix is $I \times J \times I \times J$ with scattered directions in the first two dimensions and incident directions in the last two dimensions. Applied in sequence with \texttt{convert\char`_S\char`_to\char`_T} the routines will return identical results. Note, these two transformations do not require physically realistic T- or S-matrices, but an unrealistic S-matrix will be filtered into a band-limited T-matrix. %Use string switch \texttt{'norm'} to transform from the normalized T-matrix.
{\footnotesize
\VerbatimInput{\code/FastMultipoleMethod/Smatrix/convert_T_to_S.m}
}
%
%\newpage
%
%\section{FMM Formulation}
%
%The dyadic Green's function is give by
%
%\begin{equation}
% \overline{\bb{G}}(\br,\br') = \left[\overline{\bb{I}} + \dfrac{1}{k^2} \nabla\nabla \right] g(\br,\br')
% \end{equation}
%
% where
%
% \[ g(\br,\br') = \dfrac{e^{ik\vert \br - \br' \vert}}{4\pi \vert \br - \br' \vert} \]
%
%The far-field dyadic Green's function is
%
%\begin{equation}
% \overline{\bb{G}}_f(\br,\br') \approx \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] g(\br,\br')
% \end{equation}
%
%
%The curl of the far expression can be derived as
%
%\[ \nabla \times \overline{\bb{G}}_f(\br,\br') = \nabla \times \left( \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] g(\br,\br')\right) \]
%
%This has the form
%
%\[ \nabla \times ( \phi \overline{\bb{F}} ) = \nabla \phi \times \overline{\bb{F}} + \phi \nabla \times \overline{\bb{F}} \]
%
%where $ \overline{\bb{F}} = \overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} $. This reduces to
%
%\[ \nabla \times \overline{\bb{G}}_f(\br,\br') = \nabla g(\br,\br') \times \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \]
%
%Applied to a vector $\bb{v}$,
%
%\begin{eqnarray}
%\left( \nabla \times \overline{\bb{G}}_f(\br,\br')\right) \cdot \bb{v} & = & -\bb{v} \cdot \left( \nabla \times \overline{\bb{G}}_f(\br,\br')\right) \\
%\ & = & - \bb{v} \cdot \left(\nabla g(\br,\br') \times \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \right) \\
% \ & = & - \left( \bb{v} \times \nabla g(\br,\br') \right) \cdot \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \\
% \ & = & -\left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \left( \bb{v} \times \nabla g(\br,\br') \right) \\
% \ & = & \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \left( \nabla g(\br,\br') \times \bb{v} \right) \\
% \ & \approx & \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \left( i k g(\br,\br')\right) \left( \hat{\bb{k}} \times \bb{v} \right)
% \end{eqnarray}
%
%The first equation because the curl of the dyadic Green's function is anti-symmetric. The third equation uses the relation $\bb{a}\cdot(\bb{b} \times \overline{\bb{c}}) = (\bb{a} \times \bb{b})\cdot \overline{\bb{c}} $. The fourth because $ \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] $ is a symmetric dyad. The fifth equation is cross product commutation. Finally the gradient is applied and the far-field taken since
%
%\[\nabla g(\br,\br') = \left(ik - \dfrac{1}{r}\right) g(\br,\br') \hat{\bb{k}} \]
%
%\subsection{Spectral Representation}
%
%The spectral representation of the exponent is
%
%\[ \dfrac{e^{ik\vert \br - \br' \vert}}{\vert \br - \br' \vert} \approx \dfrac{ik}{4\pi} \int e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) e^{i\bb{k}\cdot(\br' - \br_s) }d\Omega \]
%
%The far field scalar Green's function is then
%
% \[ g(\br,\br') \approx \dfrac{ik}{16\pi^2} \int e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) e^{i\bb{k}\cdot(\br' - \br_s) }d\Omega \]
%
%And the dyadic Green's function is
%
%
%\begin{equation}
% \overline{\bb{G}}_f(\br,\br') \approx \dfrac{ik}{16\pi^2} \int \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) e^{i\bb{k}\cdot(\br' - \br_s) }d\Omega
% \end{equation}
%
%\subsubsection{Source Volume Integral}
%
%The electric field due to a source is
%
%\[ \bb{E}(\br) = i \omega \mu \int \overline{\bb{G}}_f(\br,\br') \cdot \bb{J}(\br') dV \]
%
%Substituting the dyadic Green's function we can write this
%
%\[ \bb{E}(\br) = \dfrac{i k}{4 \pi} \int \bb{F}(\hat{\bb{k}}) e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) d\Omega \]
%
%where
%
%\[\bb{F}(\hat{\bb{k}}) = \dfrac{1}{4 \pi} (i \omega \mu) \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \int e^{i\bb{k}\cdot(\br' - \br_s) }\bb{J}(\br') dV \]
%
%where $\bb{F}(\hat{\bb{k}}) $ is the far pattern of the source.
%
%\subsubsection{Surface Integrals}
%
%The reflected and transmitted fields above and below the boundary, respectively, are given by
%
%\begin{eqnarray}
%\bb{E}_r(\br) & = & \int_S dS' \left\{ i\omega \mu \G{1} \cdot \hat{n}' \times \bb{H}(\br') + \left[\nabla \times \G{1} \right] \cdot \hat{n}' \times \bb{E}(\br')\right\} \nonumber \\
%\bb{E}_t(\br) & = & \int_S dS' \left\{ i\omega \mu \G{2} \cdot \hat{n}_d' \times \bb{H}(\br') + \left[\nabla \times \G{2} \right] \cdot \hat{n}_d' \times \bb{E}(\br')\right\} \nonumber
%\end{eqnarray}
%
%where $\hat{n}$ and $\hat{n}_d$ are the outward and inward pointing surface normals, and $\bb{E}$ and $\bb{H}$ are the fields on the boundary.
%
%Generically, these are
%
%\begin{eqnarray}
%\bb{E}(\br) & = & \int_S dS' \left\{ i\omega \mu \G{} \cdot \hat{n}' \times \bb{H}(\br') + \left[\nabla \times \G{} \right] \cdot \hat{n}' \times \bb{E}(\br')\right\} \nonumber \\
%\end{eqnarray}
%
%Substituting the far field dyadic Green's function
%
%\begin{eqnarray}
%\bb{E}(\br) & = & \int_S dS' \left\{ i\omega \mu \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] g(\br,\br') \cdot \hat{n}' \times \bb{H}(\br') \right. \nonumber \\
%\ & \ & \left. + \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \left( i k g(\br,\br')\right) \left( \hat{\bb{k}} \times \left( \hat{n}' \times \bb{E}(\br')\right)\right) \right\} \nonumber
%\end{eqnarray}
%
%which is
%
%\begin{eqnarray}
%\bb{E}(\br) & = & \int_S dS' \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \left( i\omega \mu \left(\hat{n}' \times \bb{H}(\br')\right) + i k \hat{\bb{k}} \times \left( \hat{n}' \times \bb{E}(\br')\right) \right) g(\br,\br') \nonumber
%\end{eqnarray}
%
%As before, substitute the spectral representation of the scalar Green's function, we can write fields as
%
%\[ \bb{E}(\br) = \dfrac{i k}{4 \pi} \int \bb{F}(\hat{\bb{k}}) e^{i\bb{k}\cdot(\br - \br_o)} T_L(\bb{k},\bb{X}) d\Omega \]
%
%\[\bb{F}(\hat{\bb{k}}) = \dfrac{1}{4 \pi} \int \left[\overline{\bb{I}} - \hat{\bb{k}} \hat{\bb{k}} \right] \cdot \left( i\omega \mu \left(\hat{n}' \times \bb{H}(\br')\right) + i k \hat{\bb{k}} \times \left( \hat{n}' \times \bb{E}(\br')\right) \right) e^{i\bb{k}\cdot(\br' - \br_s) } dS' \]
%
%Next assume the incident field has the form
%
%\[ \bb{E}_{inc}(\br) = \dfrac{i k}{4 \pi} \int \bb{F}_{inc} (\hat{\bb{k}}') e^{i\bb{k}'\cdot(\br - \br_o)} T_L(\bb{k}',\bb{X}) d\Omega' \]
%
%For plane waves, the magnetic field is
%
%\[ \bb{H}_{inc}(\br) = \dfrac{1}{\eta} \hat{\bb{k}} \times \bb{E}_{inc}(\br) \]
%
%The goal is the write $\bb{F}(\hat{\bb{k}})$ as
%
%\[ \bb{F}(\hat{\bb{k}}) = \int \overline{\bb{S}}(\hat{\bb{k}},\hat{\bb{k}}') \cdot \bb{F}_{inc} (\hat{\bb{k}}') T_L(\bb{k}',\bb{X}) d\Omega' \]
%
%where $\overline{\bb{S}}(\hat{\bb{k}},\hat{\bb{k}}')$ is the scattering matrix.
%
%
%%
%%\section{Dyadic Green's Function Representations}
%%
%%\subsection{Forms of the Dyadic Green's Function}
%%
%%\subsubsection{Form 1}
%%
%%The explicit expression for the dyadic Green's function is given by
%%
%%\begin{equation}
%% \overline{\bb{G}}(\br,\br') = \left[\overline{\bb{I}} + \dfrac{1}{k^2} \nabla\nabla \right] \dfrac{e^{ik\vert \br - \br' \vert}}{4\pi \vert \br - \br' \vert}
%% \end{equation}
%%
%%See Chapter 1 for details.
%%
%%\subsubsection{Form 2}
%%
%%The dyadic Green's function written as an expansion of vector wave functions is
%%
%%\begin{equation}
%%\overline{\mathbf{G}}(\br,\br') =
%%ik\displaystyle\sum\limits_{lm} \dfrac{1}{l(l+1)}\left[\M{\br}\Re\Mhat{\br'} + \N{\br}\Re\Nhat{\br'}\right]
%%\end{equation}
%%
%%for $\vert \br' \vert < \vert \br \vert $.
%%
%%
%%
%%\subsubsection{Form 3}
%%
%%The far-field approximation of the dyadic Green's function used in the FMM is given by
%%
%%\begin{equation}
%%\overline{\mathbf{G}}(\br,\br') \approx \dfrac{ik}{4\pi} \int_S \left(\overline{\bb{I}} - \hat{\bb{k}}\hat{\bb{k}}'\right)
%%e^{i \bb{k}\cdot(\bb{r}-\bb{r}_o) } T_L(\bb{k},\bb{X}) e^{-i \bb{k}\cdot(\bb{r}'-\bb{r}_s) } d^2\hat{\bb{k}}
%%\end{equation}
%%
%%where
%%
%%\begin{eqnarray}
%%\bb{r} &=& \textrm{Observation point} \nonumber \\
%%\bb{r}_o &=& \textrm{Observation frame origin} \nonumber \\
%%\bb{r}' &=& \textrm{Source point} \nonumber \\
%%\bb{r}_s &=& \textrm{Source frame origin} \nonumber \\
%%\bb{X} = \bb{r}_o - \bb{r}_s &=& \textrm{Vector between origins} \nonumber \\
%%\bb{k} = k \hat{\bb{k}} &=& \textrm{Wave vectors of the expansion} \nonumber \\
%%\hat{\bb{k}} &=& \textrm{Plane wave directions on the unit sphere}\nonumber
%%\end{eqnarray}
%%
%%
%%\subsection{Equivalency}
%%
%%We want to show the equivalent between all three forms of the dyadic Green's function. This is to validate the computation of $T_L(\bb{k},\bb{X})$. The electric field radiated from a current density $\bb{J}(\bb{r})$ is given by
%%
%%\begin{equation}
%%\bb{E}(\br) = i\omega\mu\int \overline{\bb{G}}(\br,\br')\cdot \bb{J}(\bb{r}) dV'
%%\end{equation}
%%
%%Let the current density be a Hertzian dipole at the origin
%%
%%\[ \bb{J}(\bb{r}) = I \hat{z} \delta(\br) \]
%%
%%\subsubsection{Form 1}
%%
%%Substituting the current density above into the integral above and using the Cartesian form of the dyadic Green's function the electric field everywhere is
%%
%%\begin{eqnarray}
%%\bb{E}(\br) &=& i\omega\mu\int \overline{\bb{G}}(\br,\br')\cdot I \hat{z} \delta(\br') dV' \\
%%\ &=& i\omega\mu I \thrcol{G_{xz}(\br,0) }{G_{yz}(\br,0) }{G_{zz}(\br,0)}
%%\end{eqnarray}
%%
%%\subsubsection{Form 2}
%%
%%\begin{eqnarray}
%%\bb{E}(\br) &=& i\omega\mu\int \overline{\bb{G}}(\br,\br')\cdot I \hat{z} \delta(\br') dV' \\
%%\ &=& i\omega\mu \int ik\displaystyle\sum\limits_{lm} \dfrac{1}{l(l+1)}\left[\M{\br}\Re\Mhat{\br'} + \N{\br}\Re\Nhat{\br'}\right] \cdot I \hat{z} \delta(\br') dV' \\
%%\ &=& \displaystyle\sum\limits_{lm} a_{lm} \M{\br} + b_{lm} \N{\br}
%%\end{eqnarray}
%%
%%where
%%
%%\begin{eqnarray}
%%a_{lm} &=& \dfrac{1}{l(l+1)}i\omega\mu (ik) I \Re\Mhat{0} \cdot \hat{z} \\
%%b_{lm} &=& \dfrac{1}{l(l+1)}i\omega\mu (ik) I \Re\Nhat{0} \cdot \hat{z}
%%\end{eqnarray}
%%
%%The $z$ unit vector can be written $\hat{z} = \cos\theta \hat{r} - \sin\theta \hat{\theta}$.
%%
%%The Bessel function expressions in the regular wave functions have the following limits at the origin
%%
%%\begin{eqnarray}
%%j_l(kr) &=& 0, \quad l \ge 1 \\
%%j_l(kr)/kr &=& 1/3, \quad l = 1 , \quad 0, \quad \textrm{o.w.}\\
%%\dfrac{[krj_l(kr)]'}{kr} &=& 2/3, \quad l = 1, \quad 0, \quad \textrm{o.w.}
%%\end{eqnarray}
%%
%%
%%
%%
%%\begin{eqnarray}
%%a_{lm} &=& 0 \\
%%b_{lm} &=& i\omega\mu (ik) I \Re\Nhat{0} \cdot \hat{z}
%%\end{eqnarray}
%%
%%
%%\subsubsection{Form 3}
%%
%%
%
%
| {
"alphanum_fraction": 0.7046094851,
"avg_line_length": 65.4722459436,
"ext": "tex",
"hexsha": "6e6fafd676a5a43864ec70c58f8f6dc991a6ccc3",
"lang": "TeX",
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-02-08T19:58:04.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-08-29T13:28:44.000Z",
"max_forks_repo_head_hexsha": "caeb9540693185e000e08d826bc2ccabb6aa82bd",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "ruzakb/Waveport",
"max_forks_repo_path": "Tex/FastMultipoleMethod/FMM.tex",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "caeb9540693185e000e08d826bc2ccabb6aa82bd",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "ruzakb/Waveport",
"max_issues_repo_path": "Tex/FastMultipoleMethod/FMM.tex",
"max_line_length": 1232,
"max_stars_count": 12,
"max_stars_repo_head_hexsha": "caeb9540693185e000e08d826bc2ccabb6aa82bd",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "nasa-jpl/Waveport",
"max_stars_repo_path": "Tex/FastMultipoleMethod/FMM.tex",
"max_stars_repo_stars_event_max_datetime": "2022-02-18T20:09:47.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-29T13:29:21.000Z",
"num_tokens": 50096,
"size": 153336
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.